我正在构建索引,如:
class BookIndex(indexes.SearchIndex,indexes.Indexable):
text= indexes.EdgeNgramField(document=True,use_template=True)
content_auto = indexes.EdgeNgramField(model_attr='title')
isbn_13 = indexes.CharField(model_attr='isbn_13')
validate = indexes.IntegerField(model_attr='validate')
price = indexes.IntegerField(model_attr='price')
authors = indexes.EdgeNgramField()
reviews = indexes.CharField()
publishers = indexes.EdgeNgramField()
institutes = indexes.EdgeNgramField()
sellers = indexes.CharField()
category = indexes.CharField()
sub_category = indexes.CharField()
我甚至尝试过使用Ngram,但部分搜索无效。
我正在查询它SearchQuerySet().all().filter(content=query)
我也试过了SearchQuerySet().filter(content__contains=query)
,即使这样也没有显示部分匹配的结果。
有人可以帮帮我吗?
答案 0 :(得分:1)
Haystack对ElasticSearch不是很好,你不能使用正确的索引值,所以你必须提供自定义ElasticSearchBackEnd才能启用它:
#in a search_backends.py file
from django.conf import settings
from haystack.backends.elasticsearch_backend import (
ElasticsearchSearchBackend,
ElasticsearchSearchEngine
)
from haystack.fields import EdgeNgramField as BaseEdgeNgramField, NgramField as BaseNgramField
from haystack.indexes import CharField
#just an example of which degree of configuration could be possible
CUSTOM_FIELD_TYPE = {
'completion': {
'type': 'completion',
'payloads': True,
'analyzer': 'suggest_analyzer',
'preserve_separators': True,
'preserve_position_increments': False
},
}
# Custom Backend
class CustomElasticBackend(ElasticsearchSearchBackend):
DEFAULT_ANALYZER = None
def __init__(self, connection_alias, **connection_options):
super(CustomElasticBackend, self).__init__(
connection_alias, **connection_options)
user_settings = getattr(settings, 'ELASTICSEARCH_INDEX_SETTINGS', None)
self.DEFAULT_ANALYZER = getattr(settings, 'ELASTICSEARCH_DEFAULT_ANALYZER', "snowball")
if user_settings:
setattr(self, 'DEFAULT_SETTINGS', user_settings)
def build_schema(self, fields):
content_field_name, mapping = super(CustomElasticBackend,
self).build_schema(fields)
for field_name, field_class in fields.items():
field_mapping = mapping[field_class.index_fieldname]
index_analyzer = getattr(field_class, 'index_analyzer', None)
search_analyzer = getattr(field_class, 'search_analyzer', None)
field_analyzer = getattr(field_class, 'analyzer', self.DEFAULT_ANALYZER)
if field_mapping['type'] == 'string' and field_class.indexed:
field_mapping["term_vector"] = "with_positions_offsets"
if not hasattr(field_class, 'facet_for') and not field_class.field_type in('ngram', 'edge_ngram'):
field_mapping['analyzer'] = field_analyzer
if field_class.field_type in CUSTOM_FIELD_TYPE:
field_mapping = CUSTOM_FIELD_TYPE.get(field_class.field_type).copy()
if index_analyzer and search_analyzer:
field_mapping['index_analyzer'] = index_analyzer
field_mapping['search_analyzer'] = search_analyzer
if 'analyzer' in field_mapping:
del(field_mapping['analyzer'])
mapping.update({field_class.index_fieldname: field_mapping})
return (content_field_name, mapping)
class CustomElasticSearchEngine(ElasticsearchSearchEngine):
backend = CustomElasticBackend
# Custom fields, just use the ones you need or create yours
class CustomFieldMixin(object):
def __init__(self, **kwargs):
self.analyzer = kwargs.pop('analyzer', None)
self.index_analyzer = kwargs.pop('index_analyzer', None)
self.search_analyzer = kwargs.pop('search_analyzer', None)
super(CustomFieldMixin, self).__init__(**kwargs)
class CustomCharField(CustomFieldMixin, CharField):
pass
class CustomCompletionField(CustomFieldMixin, CharField):
field_type = 'completion'
class CustomEdgeNgramField(CustomFieldMixin, BaseEdgeNgramField):
pass
class CustomNgramField(CustomFieldMixin, BaseNgramField):
pass
#settings.py
ELASTICSEARCH_INDEX_SETTINGS = {
'settings': {
"analysis": {
"analyzer": {
"custom_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": [ "lowercase", "asciifolding" ]
},
"str_index_analyzer" : {
"type": "custom",
"tokenizer" : "haystack_ngram_tokenizer",
"filter" : ["stopwords", "asciifolding", "lowercase", "snowball", "elision", "worddelimiter"]
},
"str_search_analyzer" : {
"type": "custom",
"tokenizer" : "standard",
"filter" : ["stopwords", "asciifolding", "lowercase", "snowball", "elision", "worddelimiter"]
},
"suggest_analyzer": {
"type":"custom",
"tokenizer":"standard",
"filter":[
"stopwords",
"standard",
"lowercase",
"asciifolding"
]
},
},
"tokenizer": {
"haystack_ngram_tokenizer": {
"type": "nGram",
"min_gram": 2,
"max_gram": 20,
},
},
"filter": {
"elision": {
"type": "elision",
"articles": ["l", "m", "t", "qu", "n", "s", "j", "d"]
},
"stopwords": {
"type": "stop",
"stopwords": ["_french_", "_english_"],
"ignore_case": True
},
"worddelimiter": {
"type": "word_delimiter"
}
}
}
}
}
#Haystack settings
HAYSTACK_CONNECTIONS = {
'default': {
...
'ENGINE': 'path.to.search_backends.CustomElasticSearchEngine',
...
},
}
答案 1 :(得分:0)
将elasticsearch-2.x
与django-haystack
个版本<2.5
一起使用会导致此问题。检查您的版本是否与这些版本匹配。
elasticsearch-2.x
以后,boost
不再是haystack传递给它的支持元数据。 (请参阅答案https://stackoverflow.com/a/36847352/5108155)
此问题已在2.5
干草堆的版本中得到修复。
在构建(或更新)索引时,elasticsearch
从未获得您打算应用于该字段的ngram分析器。您可以通过手动运行来验证这一点 -
curl 'http://<elasticsearch_address>/<index_name>/?pretty'
这将仅显示字段上的类型,而不显示分析器属性。
有趣的是,由于silently_fail
类中的内部ElasticSearchBackend
属性,haystack不会抛出此异常。