突出弹性搜索中的部分单词

时间:2014-01-23 14:13:01

标签: autocomplete elasticsearch

我使用n-gram标记器在弹性搜索中创建了一个自动建议器。现在我想突出显示用户在自动建议列表中输入的字符序列。为此我使用了弹性搜索中可用的荧光笔,我的代码如下所示,但在输出中,完整的术语正在突出显示我出错的地方。

{
    "query": {
        "query_string": {
            "query": "soft",
            "default_field": "competency_display_name"
        }
    },
    "highlight": {
        "pre_tags": ["<b>"],
        "post_tags": ["</b>"],
        "fields": {
            "competency_display_name": {}
        }
    }
}

,结果是

{
   "took": 8,
   "timed_out": false,
   "_shards": {
      "total": 5,
      "successful": 5,
      "failed": 0
   },
   "hits": {
      "total": 1,
      "max_score": 1,
      "hits": [
         {
            "_index": "competency_auto_suggest",
            "_type": "competency",
            "_id": "4",
            "_score": 1,
            "_source": {
               "review": null,
               "competency_title": "Software Development",
               "id": 4,
               "competency_display_name": "Software Development"
            },
            "highlight": {
               "competency_display_name": [
                  "<b>Software Development</b>"
               ]
            }
         }
      ]
   }
}

映射

"competency":{
    "properties": {
        "competency_display_name":{
            "type":"string",
            "index_analyzer": "index_ngram_analyzer",
            "search_analyzer": "search_term_analyzer"
        }
    }
}

设置

"analysis": {
    "filter": {
        "ngram_tokenizer": {
            "type": "nGram",
            "min_gram": "1",
            "max_gram": "15",
            "token_chars": [ "letter", "digit" ]
        }
    },
    "analyzer": {
        "index_ngram_analyzer": {
            "type": "custom",
            "tokenizer": "keyword",
            "filter": [ "ngram_tokenizer", "lowercase" ]
        },
        "search_term_analyzer": {
            "type": "custom",
            "tokenizer": "keyword",
            "filter": "lowercase" 
        }
    }
}

如何突出软件而不是软件开发。

1 个答案:

答案 0 :(得分:10)

在这种情况下,您应该使用ngram tokenizer而不是ngram过滤器来突出显示。 需要with_positions_offsets来帮助更快地突出显示。

这是可行的设置&amp;映射:

"analysis": {
    "tokenizer": {
        "ngram_tokenizer": {
            "type": "nGram",
            "min_gram": "1",
            "max_gram": "15",
            "token_chars": [ "letter", "digit" ]
        }
    },
    "analyzer": {
        "index_ngram_analyzer": {
            "type": "custom",
            "tokenizer": "ngram_tokenizer",
            "filter": [ "lowercase" ]
        },
        "search_term_analyzer": {
            "type": "custom",
            "tokenizer": "keyword",
            "filter": "lowercase" 
        }
    }
}

映射

"competency":{
    "properties": {
        "competency_display_name":{
            "type":"string",
            "index_analyzer": "index_ngram_analyzer",
            "search_analyzer": "search_term_analyzer",
            "term_vector":"with_positions_offsets" 
        }
    }
}