在多个shingle过滤器上调整聚合查询

时间:2016-09-09 21:13:44

标签: performance elasticsearch lucene aggregate-functions n-gram

我有13,000个网页,其正文已编入索引。目标是获得一个单词,两个单词,三个单词的前200个短语频率......直到八个单词短语。

这些网页总共有超过1.5亿字需要进行标记。

问题是查询大约需要15分钟,之后它会耗尽堆空间,无法完成。

我在4 cpu核心,8GB RAM,SSD ubuntu服务器上进行测试。 6GB的RAM被指定为堆。交换被禁用。

现在,我可以通过分成8个不同的索引来实现这一点,查询/设置/映射组合适用于单一类型的单词短语。也就是说,我可以单独使用单词短语,双字短语等运行,我可以得到我期望的结果(尽管每个仍需要大约5分钟)。我想知道是否有办法调整这个完整的聚合,以便使用一个索引和查询来处理我的硬件。

设置和映射:

{
   "settings":{
      "index":{
         "number_of_shards" : 1,
         "number_of_replicas" : 0,
         "analysis":{
            "analyzer":{
               "analyzer_shingle_2":{
                  "tokenizer":"standard",
                  "filter":["standard", "lowercase", "filter_shingle_2"]
               },
               "analyzer_shingle_3":{
                  "tokenizer":"standard",
                  "filter":["standard", "lowercase", "filter_shingle_3"]
               },
               "analyzer_shingle_4":{
                  "tokenizer":"standard",
                  "filter":["standard", "lowercase", "filter_shingle_4"]
               },
               "analyzer_shingle_5":{
                  "tokenizer":"standard",
                  "filter":["standard", "lowercase", "filter_shingle_5"]
               },
               "analyzer_shingle_6":{
                  "tokenizer":"standard",
                  "filter":["standard", "lowercase", "filter_shingle_6"]
               },
               "analyzer_shingle_7":{
                  "tokenizer":"standard",
                  "filter":["standard", "lowercase", "filter_shingle_7"]
               },
               "analyzer_shingle_8":{
                  "tokenizer":"standard",
                  "filter":["standard", "lowercase", "filter_shingle_8"]
               }
            },
            "filter":{
               "filter_shingle_2":{
                  "type":"shingle",
                  "max_shingle_size":2,
                  "min_shingle_size":2,
                  "output_unigrams":"false"
               },
               "filter_shingle_3":{
                  "type":"shingle",
                  "max_shingle_size":3,
                  "min_shingle_size":3,
                  "output_unigrams":"false"
               },
               "filter_shingle_4":{
                  "type":"shingle",
                  "max_shingle_size":4,
                  "min_shingle_size":4,
                  "output_unigrams":"false"
               },
               "filter_shingle_5":{
                  "type":"shingle",
                  "max_shingle_size":5,
                  "min_shingle_size":5,
                  "output_unigrams":"false"
               },
               "filter_shingle_6":{
                  "type":"shingle",
                  "max_shingle_size":6,
                  "min_shingle_size":6,
                  "output_unigrams":"false"
               },
               "filter_shingle_7":{
                  "type":"shingle",
                  "max_shingle_size":7,
                  "min_shingle_size":7,
                  "output_unigrams":"false"
               },
               "filter_shingle_8":{
                  "type":"shingle",
                  "max_shingle_size":8,
                  "min_shingle_size":8,
                  "output_unigrams":"false"
               }
            }
         }
      }
   },
   "mappings":{
      "items":{
         "properties":{
            "body":{
               "type": "multi_field",
               "fields": {
                  "two-word-phrases": {
                     "analyzer":"analyzer_shingle_2",
                     "type":"string"
                  },
                  "three-word-phrases": {
                     "analyzer":"analyzer_shingle_3",
                     "type":"string"
                  },
                  "four-word-phrases": {
                     "analyzer":"analyzer_shingle_4",
                     "type":"string"
                  },
                  "five-word-phrases": {
                     "analyzer":"analyzer_shingle_5",
                     "type":"string"
                  },
                  "six-word-phrases": {
                     "analyzer":"analyzer_shingle_6",
                     "type":"string"
                  },
                  "seven-word-phrases": {
                     "analyzer":"analyzer_shingle_7",
                     "type":"string"
                  },
                  "eight-word-phrases": {
                     "analyzer":"analyzer_shingle_8",
                     "type":"string"
                  }
               }
            }
         }
      }
   }
}

查询:

{
  "size" : 0,
  "aggs" : {
    "one-word-phrases" : {
      "terms" : {
        "field" : "body",
        "size"  : 200
      }
    },
    "two-word-phrases" : {
      "terms" : {
        "field" : "body.two-word-phrases",
        "size"  : 200
      }
    },
    "three-word-phrases" : {
      "terms" : {
        "field" : "body.three-word-phrases",
        "size"  : 200
      }
    },
    "four-word-phrases" : {
      "terms" : {
        "field" : "body.four-word-phrases",
        "size"  : 200
      }
    },
    "five-word-phrases" : {
      "terms" : {
        "field" : "body.five-word-phrases",
        "size"  : 200
      }
    },
    "six-word-phrases" : {
      "terms" : {
        "field" : "body.six-word-phrases",
        "size"  : 200
      }
    },
    "seven-word-phrases" : {
      "terms" : {
        "field" : "body.seven-word-phrases",
        "size"  : 200
      }
    },
    "eight-word-phrases" : {
      "terms" : {
        "field" : "body.eight-word-phrases",
        "size"  : 200
      }
    }
  }
}

1 个答案:

答案 0 :(得分:1)

你真的需要你的整个收藏在记忆中吗?您的分析可以重写为具有一小部分资源要求的批处理管道:

  1. 解析每个已抓取的网站并将带状疱疹输出到一系列平面文件:n-grams in python, four, five, six grams?
  2. 对shingle输出文件进行排序
  3. 解析木瓦输出文件并输出木瓦计数文件
  4. 解析所有木瓦计数文件并输出主聚合木瓦计数文件
  5. 按降序排序
  6. (这种事情通常在UNIX管道中完成并并行化。)

    或者你可以用更多的内存来运行它。