我正在使用 django-python 开发一个项目。已安装“ oaisearch ”以从不同网站检索元数据,并安装了“ django_elasticsearch_dsl ”以对其进行索引。证实“ oaisearch”运行没有问题。
当运行“ {python3 manage.py search_index --create -f
”以开始建立索引时,会发生此问题。
具有您要索引的配置的文件“ documents.py ”如下。
from elasticsearch_dsl import analyzer
from django_elasticsearch_dsl import DocType, Index
from oaisearch.models import Digital_Resources
digital_resources = Index('digital_resources')
digital_resources.settings(
number_of_shards=1,
number_of_replicas=0
)
html_strip = analyzer(
'html_strip',
tokenizer="standard",
filter=["standard", "lowercase", "stop", "snowball"],
char_filter=["html_strip"]
)
@digital_resources.doc_type
class Resources(DocType):
class Meta:
model = Digital_Resources
fields = [
#'oai_server',
'creator',
'title',
'subject',
'description',
'identifier',
]
执行上述命令后,控制台失败消息如下
Creating index 'digital_resources'
PUT http://localhost:9200/digital_resources [status:400 request:0.014s]
Traceback (most recent call last):
File "manage.py", line 22, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.5/dist-packages/django/core/management/__init__.py", line 363, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.5/dist-packages/django/core/management/__init__.py", line 355, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.5/dist-packages/django/core/management/base.py", line 283, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.5/dist-packages/django/core/management/base.py", line 330, in execute
output = self.handle(*args, **options)
File "/usr/local/lib/python3.5/dist-packages/django_elasticsearch_dsl/management/commands/search_index.py", line 128, in handle
self._create(models, options)
File "/usr/local/lib/python3.5/dist-packages/django_elasticsearch_dsl/management/commands/search_index.py", line 84, in _create
index.create()
File "/usr/local/lib/python3.5/dist-packages/elasticsearch_dsl/index.py", line 203, in create
self.connection.indices.create(index=self._name, body=self.to_dict(), **kwargs)
File "/usr/local/lib/python3.5/dist-packages/elasticsearch/client/utils.py", line 76, in _wrapped
return func(*args, params=params, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/elasticsearch/client/indices.py", line 88, in create
params=params, body=body)
File "/usr/local/lib/python3.5/dist-packages/elasticsearch/transport.py", line 318, in perform_request
status, headers_response, data = connection.perform_request(method, url, params, body, headers=headers, ignore=ignore, timeout=timeout)
File "/usr/local/lib/python3.5/dist-packages/elasticsearch/connection/http_urllib3.py", line 186, in perform_request
self._raise_error(response.status, raw_data)
File "/usr/local/lib/python3.5/dist-packages/elasticsearch/connection/base.py", line 125, in _raise_error
raise HTTP_EXCEPTIONS.get(status_code, TransportError)(status_code, error_message, additional_info)
elasticsearch.exceptions.RequestError: RequestError(400, 'mapper_parsing_exception', 'Root mapping definition has unsupported parameters: [doc : {properties={identifier={type=text}, creator={type=text}, subject={type=text}, description={type=text}, title={type=text}}}]')
答案 0 :(得分:0)
您正在运行什么版本的elasticsearch?看来您正在运行的是import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Import Data
data = np.loadtxt('./my_data.csv')
# Reshape data into batches of 500 timesteps - 1 dim per timestep
# For now, we do not split into testing/training sets
# Assume all data is for training
data_pre = data.reshape(-1, 500, 1)[:,:-1,:] # Shift input data backward by 1
data_post = data.reshape(-1, 500, 1)[:,1:,:] # Shift input data forward by 1
# Build LSTM Model for Training:
# Allow flexible number of timesteps per input (shape=(None,1))
inputs = tf.keras.layers.Input(shape=(None,1))
lstm_1 = tf.keras.layers.CuDNNLSTM(units=512, return_sequences=True)(inputs)
lstm_2 = tf.keras.layers.CuDNNLSTM(units=256, return_sequences=True)(lstm_1)
# Activate dense layer with linear activation func for regression
outputs = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(units=1, activation='linear'))(lstm_2)
lstm_model = tf.keras.Model(inputs=inputs, outputs=outputs)
lstm_model.compile('adam', loss='mae', metrics=['mae','mse'])
lstm_model.fit(x=data_pre, y = data_post, epochs=100, batch_size=16, shuffle=False) # I have trained up to 500 epochs and while the loss decreases there is no increase in prediction performance.
# Build Stateful LSTM Model for Sample-by-Sample Prediction
# Assume 1 timestep per input of dim=1
inputs = tf.keras.layers.Inputs(shape=(1,1,1))
lstm_1 = tf.keras.layers.CuDNNLSTM(units=512, return_sequences=True, stateful=True)(inputs)
lstm_2 = tf.keras.layers.CuDNNLSTM(units=256, return_sequences=True, stateful=True)(lstm_1)
outputs = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(units=1, activation='linear'))
prediction_model = tf.keras.Model(inputs=inputs, outputs=outputs)
# Copy weights from trained, non-stateful model:
prediction_model.set_weights(lstm_model.get_weights())
#Reset network state
prediction_model.reset_states()
#Initialize model internal state with a single sample from the input data shifted by 1 unit backwards
seed = prediction_model.predict(data_pre[0][0][None, None, :])
# Predict 20secs of data
output_array = np.zeros((10000,1,1)) # Allocate Memory
for i in range(0,10000):
temp = prediction_model.predict(seed) # Iteratively predict next sample value
output_array[i] = temp
seed = temp
版本不支持的Elasticsearch7.x。尝试寻找更新的版本或使用elasticsearch 6.x。