ElasticSearch双嵌套排序

时间:2016-06-12 03:38:39

标签: sorting elasticsearch

我的文档看起来像这样(这里是示例):

{
"user": "xyz",
"state": "FINISHED",
"finishedTime": 1465566467161,
"jobCounters": {
    "counterGroup": [
        {
            "counterGroupName": "org.apache.hadoop.mapreduce.FileSystemCounter",
            "counter": [
                {
                    "name": "FILE_BYTES_READ",
                    "mapCounterValue": 206509212380,
                    "totalCounterValue": 423273933523,
                    "reduceCounterValue": 216764721143
                },
                {
                    "name": "FILE_BYTES_WRITTEN",
                    "mapCounterValue": 442799895522,
                    "totalCounterValue": 659742824735,
                    "reduceCounterValue": 216942929213
                },
                {
                    "name": "HDFS_BYTES_READ",
                    "mapCounterValue": 207913352565,
                    "totalCounterValue": 207913352565,
                    "reduceCounterValue": 0
                },
                {
                    "name": "HDFS_BYTES_WRITTEN",
                    "mapCounterValue": 0,
                    "totalCounterValue": 89846725044,
                    "reduceCounterValue": 89846725044
                }
            ]
        },
        {
            "counterGroupName": "org.apache.hadoop.mapreduce.JobCounter",
            "counter": [
                {
                    "name": "TOTAL_LAUNCHED_MAPS",
                    "mapCounterValue": 0,
                    "totalCounterValue": 13394,
                    "reduceCounterValue": 0
                },
                {
                    "name": "TOTAL_LAUNCHED_REDUCES",
                    "mapCounterValue": 0,
                    "totalCounterValue": 720,
                    "reduceCounterValue": 0
                }
            ]
        }
    ]
}

}

现在我希望sort这些数据能够在totalCounterValue counter.name FILE_BYTES_READ counter.name的基础上获得TOP 15文档。我已尝试对此进行嵌套排序,但无论我在HDFS_BYTES_READ中编写哪个键名,它总是在{ "_source": true, "size": 15, "query": { "bool": { "must": [ { "term": { "state": { "value": "FINISHED" } } }, { "range": { "startedTime": { "gte": "now - 4d", "lte": "now" } } } ] } }, "sort": [ { "jobCounters.counterGroup.counter.totalCounterValue": { "order": "desc", "nested_path": "jobCounters.counterGroup", "nested_filter": { "nested": { "path": "jobCounters.counterGroup.counter", "filter": { "term": { "jobCounters.counterGroup.counter.name": "file_bytes_read" } } } } } } ]} 的基础上进行排序。任何人都可以帮我解决问题。

jobCounters

这是我们创建的"jobCounters": { "type": "nested", "include_in_parent": true, "properties" : { "counterGroup": { "type": "nested", "include_in_parent": true, "properties": { "counterGroupName": { "type": "string", "fields": { "raw": { "type": "string", "index": "not_analyzed" } } }, "counter" : { "type": "nested", "include_in_parent": true, "properties": { "reduceCounterValue": { "type": "long" }, "name": { "type": "string", "analyzer": "english", "fields": { "raw": { "type": "string", "index": "not_analyzed" } } }, "totalCounterValue": { "type": "long" }, "mapCounterValue": { "type": "long" } } } } } } } 的映射:

totalCounterValue

我按照ElasticSearch的嵌套排序文档提出了这个查询,但我不知道为什么总是对HDFS_BYTES_READ的{​​{1}}进行排序而不管jobCounters.counterGroup.counter.name& #39; s值。

1 个答案:

答案 0 :(得分:0)

你可以尝试这样的事情,

curl -XGET 'http://localhost:9200/index/jobCounters/_search' -d '
{
  "size": 15,
  "query": {
    "nested": {
      "path": "jobCounters.counterGroup.counter",
      "filter": {
        "term": {
          "jobCounters.counterGroup.counter.name": "file_bytes_read"
        }
      }
    }
  },
  "sort": [
    {
      "jobCounters.counterGroup.counter.totalCounterValue": {
        "order": "desc",
        "nested_path": "jobCounters.counterGroup",
        "nested_filter": {
          "nested": {
            "path": "jobCounters.counterGroup.counter",
            "filter": {
              "term": {
                "jobCounters.counterGroup.counter.name": "file_bytes_read"
              }
            }
          }
        }
      }
    }
  ]
}
'

阅读this文档的末尾。它解释了我们必须在nested_filter中重复相同的查询。