我运行了以下python代码而没有错误。但现在我有了这个
TransportError(400, u'MapperParsingException [Analyzer [whitespace_analyzer] not found for field [job style]]'
代码是:
from elasticsearch import Elasticsearch
import xlrd
es = Elasticsearch()
es.indices.create("we_doing",body={
'settings': {
"analysis": {
"tokenizer":{
"camel":{
"type": "pattern",
"pattern":"([^\\\\p{L}\\\\d]+)|(?<=\\\\D)(?=\\\\d)|(?<=\\\\d)(?=\\\\D)|(?<=[\\\\p{L}&&[^\\\\p{Lu}]])(?=\\\\p{Lu})|(?<=\\\\p{Lu})(?=\\\\p{Lu}[\\\\p{L}&&[^\\\\p{Lu}]])"
}},
"analyzer": {
"nGram_analyzer": {
"type": "custom",
"tokenizer": ["whitespace","camel"],
"filter": [
"lowercase",
"asciifolding"
]
},
"whitespace_analyzer": {
"type": "custom",
"tokenizer": ["whitespace","camel"],
"filter": [
"lowercase",
"asciifolding"
]
}}}}}, ignore = 400 )
mapping = {
"we_data": {
"properties": {
"job id": {"type": "string", "index": "not_analyzed"},
"job style":{
"type":"string",
"index_analyzer": "nGram_analyzer",
"search_analyzer": 'whitespace_analyzer',
},
"location": {"type": "string", "index": "not_analyzed"},
"experience": {"type": "string", "index": "not_analyzed"},
"post date": {"type": "string", "index": "not_analyzed"},
"job description": {"type": "string", "index": "not_analyzed"},
}}}
es.indices.put_mapping(index="we_doing", doc_type="we_data", body=mapping)
它运作得很好。现在我遇到了这么多麻烦。它将在2天内上线。请帮忙。
答案 0 :(得分:0)
分析仪只能使用一个标记器(每Elasticsearch Analysis reference page个)。你需要使用TokenFilter来取代其中一个(Word Delimiter Token Filter应该做得很好),或者使用一个完成两者的Tokenizer。