在Search APIs中,有一个名为统计组的部分为:
搜索可以与统计组相关联,统计组维护每个组的统计信息聚合。稍后可以使用indices stats API专门检索它。例如,这是一个搜索正文请求,它将请求与两个不同的组相关联:
{
"query" : {
"match_all" : {}
},
"stats" : ["group1", "group2"]
}
我的问题是,什么是统计小组,我们如何创建它们,以及它们在哪里使用?
编辑1:
似乎这些群组与_stats
相关。正如@evanv所说,在Index stats下有更多的解释。但该文档并未解释如何创建组。此外,我无法找到使用_search
API的方法。不过,我想使用search
下的_stats
使用:
GET /_stats/search?groups=search,indexing
所以我的问题仍然存在:
_search
API中使用此功能?编辑2:
您似乎通过在操作中包含stats
参数来创建这些组。例如,如果我提交此查询5次:
GET /twitter/tweet/_search
{
"query": {
"match_all": {
}
},
"stats": [
"makes_no_sense"
]
}
如果它已经存在,它将创建一个新组,称为" makes_no_sense",将操作指向该组,然后当我获得索引的统计数据时:
GET /_stats/search?groups=makes_no_sense
响应将makes_no_sense
作为search
下的一组包含在内:
{
"_shards": {
"total": 43,
"successful": 22,
"failed": 0
},
"_all": {
"primaries": {
"search": {
"open_contexts": 0,
"query_total": 37983,
"query_time_in_millis": 2695,
"query_current": 0,
"fetch_total": 37796,
"fetch_time_in_millis": 1472,
"fetch_current": 0,
"scroll_total": 5,
"scroll_time_in_millis": 266,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0,
"groups": {
"makes_no_sense": {
"query_total": 5,
"query_time_in_millis": 0,
"query_current": 0,
"fetch_total": 5,
"fetch_time_in_millis": 0,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
}
}
},
"total": {
"search": {
"open_contexts": 0,
"query_total": 37983,
"query_time_in_millis": 2695,
"query_current": 0,
"fetch_total": 37796,
"fetch_time_in_millis": 1472,
"fetch_current": 0,
"scroll_total": 5,
"scroll_time_in_millis": 266,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0,
"groups": {
"makes_no_sense": {
"query_total": 5,
"query_time_in_millis": 0,
"query_current": 0,
"fetch_total": 5,
"fetch_time_in_millis": 0,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
}
}
}
},
"indices": {
"bank": {
"primaries": {
"search": {
"open_contexts": 0,
"query_total": 180,
"query_time_in_millis": 369,
"query_current": 0,
"fetch_total": 71,
"fetch_time_in_millis": 35,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
},
"total": {
"search": {
"open_contexts": 0,
"query_total": 180,
"query_time_in_millis": 369,
"query_current": 0,
"fetch_total": 71,
"fetch_time_in_millis": 35,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
}
},
"twitter": {
"primaries": {
"search": {
"open_contexts": 0,
"query_total": 19,
"query_time_in_millis": 1,
"query_current": 0,
"fetch_total": 19,
"fetch_time_in_millis": 0,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0,
"groups": {
"makes_no_sense": {
"query_total": 5,
"query_time_in_millis": 0,
"query_current": 0,
"fetch_total": 5,
"fetch_time_in_millis": 0,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
}
}
},
"total": {
"search": {
"open_contexts": 0,
"query_total": 19,
"query_time_in_millis": 1,
"query_current": 0,
"fetch_total": 19,
"fetch_time_in_millis": 0,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0,
"groups": {
"makes_no_sense": {
"query_total": 5,
"query_time_in_millis": 0,
"query_current": 0,
"fetch_total": 5,
"fetch_time_in_millis": 0,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
}
}
}
},
"test": {
"primaries": {
"search": {
"open_contexts": 0,
"query_total": 45,
"query_time_in_millis": 6,
"query_current": 0,
"fetch_total": 10,
"fetch_time_in_millis": 1,
"fetch_current": 0,
"scroll_total": 5,
"scroll_time_in_millis": 266,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
},
"total": {
"search": {
"open_contexts": 0,
"query_total": 45,
"query_time_in_millis": 6,
"query_current": 0,
"fetch_total": 10,
"fetch_time_in_millis": 1,
"fetch_current": 0,
"scroll_total": 5,
"scroll_time_in_millis": 266,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
}
},
".kibana": {
"primaries": {
"search": {
"open_contexts": 0,
"query_total": 37689,
"query_time_in_millis": 2303,
"query_current": 0,
"fetch_total": 37688,
"fetch_time_in_millis": 1386,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
},
"total": {
"search": {
"open_contexts": 0,
"query_total": 37689,
"query_time_in_millis": 2303,
"query_current": 0,
"fetch_total": 37688,
"fetch_time_in_millis": 1386,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
}
},
"blogs": {
"primaries": {
"search": {
"open_contexts": 0,
"query_total": 40,
"query_time_in_millis": 11,
"query_current": 0,
"fetch_total": 6,
"fetch_time_in_millis": 1,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
},
"total": {
"search": {
"open_contexts": 0,
"query_total": 40,
"query_time_in_millis": 11,
"query_current": 0,
"fetch_total": 6,
"fetch_time_in_millis": 1,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
}
},
"customer": {
"primaries": {
"search": {
"open_contexts": 0,
"query_total": 10,
"query_time_in_millis": 5,
"query_current": 0,
"fetch_total": 2,
"fetch_time_in_millis": 49,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
},
"total": {
"search": {
"open_contexts": 0,
"query_total": 10,
"query_time_in_millis": 5,
"query_current": 0,
"fetch_total": 2,
"fetch_time_in_millis": 49,
"fetch_current": 0,
"scroll_total": 0,
"scroll_time_in_millis": 0,
"scroll_current": 0,
"suggest_total": 0,
"suggest_time_in_millis": 0,
"suggest_current": 0
}
}
}
}
}
现在我的问题是:
答案 0 :(得分:0)
它们是在每个索引级别维护的计数器和元数据的混合。如果你有一个索引" foo"然后你转到localhost:9200/foo/_stats?pretty&human
,你会看到一堆关于索引有多大,已经向索引发出了多少搜索请求,获取请求的数量,缓存了多少数据的信息。索引等。要创建统计数据组,您只需包含
"stats" : ["stat_1", "stat_2", .... "stat_n"]
在您的请求中。
当您访问localhost:9200/foo/_stats?pretty&human
时,您会看到您已定义的统计信息组的统计信息。
您可以详细了解此处存储的指标:https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html