model = Sequential()
model.add(Conv1D(32, kernel_size=3, activation='elu', padding='same',
input_shape=(15, 512)))
model.add(Conv1D(32, kernel_size=3, activation='elu', padding='same'))
model.add(Conv1D(32, kernel_size=3, activation='elu', padding='same'))
model.add(Conv1D(32, kernel_size=3, activation='elu', padding='same'))
model.add(Dropout(0.25))
model.add(Conv1D(32, kernel_size=2, activation='elu', padding='same'))
model.add(Conv1D(32, kernel_size=2, activation='elu', padding='same'))
model.add(Conv1D(32, kernel_size=2, activation='elu', padding='same'))
model.add(Conv1D(32, kernel_size=2, activation='elu', padding='same'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='tanh'))
model.add(Dense(256, activation='tanh'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
......preprocessing for new input.....
pred = model.predict(np.array(final))
print("%s sentiment; %f%% confidence" % (labels[np.argmax(pred)], pred[0]
[np.argmax(pred)] * 100))
****假定final作为输入 当我想预测输入的情绪时,我会遇到此错误:
ValueError: Error when checking model input: the list of Numpy arrays that
you are passing to your model is not the size the model expected. Expected to
see 1 array(s), but instead got the following list of 3 arrays: [array([[
0.08031651, 0.05684812, 0.22872323, ..., -0.19047852
抱歉,这是一个愚蠢的问题!我知道在SOF中有人问过它几次,但是我做了他们的大部分建议,由于我对kras的了解不多,看来这对我来说不切实际
非常感谢
答案 0 :(得分:0)
删除您的代码,将该代码添加到该链接中存在的代码之后
from nltk import word_tokenize
import numpy as np
vector_size=512
padding=np.zeros((vector_size ))
sentences=['im so happy','you are beautiful','i got scolded today']
final=[]
for sentence in sentences:
temp=[]
words=word_tokenize(sentence)
for word in words:
if word in X_vecs:
temp.append(X_vecs[words])
#padding to get length of 15
for i in range(15-len(temp)):
temp.append(padding)
final.append(temp)
predictions = model.predict(np.array(final))
for prediction in predictions:
print("%s sentiment; %f%% confidence" % (labels[np.argmax(prediction)], prediction[np.argmax(prediction)] * 100))
您能运行一下并报告我吗?如果可行,我将解释代码。