我无法解决此错误。谁能解决这个错误?我将非常感谢。
下面我附上我的代码和该代码的错误:
ValueError Traceback (most recent call last)
<ipython-input-65-ec0e93dabcc7> in <module>
82 # hist = model.fit(X_train, y_train, batch_size=58, nb_epoch=10, validation_split = 0.1, verbose = 1)
83 hist=model.fit(X_train,y_train,batch_size=128,epochs=10,
---> 84 validation_split=0.2,callbacks=[EarlyStopping(monitor='val_loss',min_delta=0.0001)])
85
86 score, acc = model.evaluate(X_test, y_test, batch_size=1)
~\Anaconda3\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
950 sample_weight=sample_weight,
951 class_weight=class_weight,
--> 952 batch_size=batch_size)
953 # Prepare validation data.
954 do_validation = False
~\Anaconda3\lib\site-packages\keras\engine\training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)
749 feed_input_shapes,
750 check_batch_axis=False, # Don't enforce the batch size.
--> 751 exception_prefix='input')
752
753 if y is not None:
~\Anaconda3\lib\site-packages\keras\engine\training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
100 'Expected to see ' + str(len(names)) + ' array(s), '
101 'but instead got the following list of ' +
--> 102 str(len(data)) + ' arrays: ' + str(data)[:200] + '...')
103 elif len(names) > 1:
104 raise ValueError(
ValueError: Error when checking model input: the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 1 array(s), but instead got the following list of 58 arrays: [array([[5.80e+17],
[5.80e+17],
[5.80e+17],
[5.80e+17],
[5.80e+17],
[5.80e+17],
[5.80e+17],
[5.80e+17],
[5.80e+17],
[5.80e+17],
[5...
主要错误是
传递给模型的Numpy数组列表不是模型的大小 预期
我在此处附加了导致错误的代码的屏幕截图:https://i.stack.imgur.com/MdGN7.png
答案 0 :(得分:0)
def load_data(test_split = 0.5):
print ('Loading data...')
df = pd.read_csv(input_file)
# df['sequence'] = df['sequence'].apply(lambda x: [int(e) for e in x.split()])
#df = df.reindex(np.random.permutation(df.index))
# train_size = int(len(df) * (1 - test_split))
# X_train = df['symbol_count'].values[:train_size]
# y_train = np.array(df['is_rumor'].values[:train_size])
# X_test = np.array(df['symbol_count'].values[train_size:])
# y_test = np.array(df['is_rumor'].values[train_size:])
y = df['is_rumor']
X = df.drop("is_rumor",axis = 1)
xTrain, xTest, yTrain, yTest = test_train_split(X,y)
# print("X_Train")
# print(xTrain.head())
# print("X_Test")
# print(xTest.head())
# print("Y_Train")
# print(yTrain.head())
# print("y_Test")
# print(yTest.head())
xTrain = xTrain.values.tolist()
xTest = xTest.values.tolist()
yTrain = yTrain.values.tolist()
yTest = yTest.values.tolist()
return xTrain, yTrain, xTest, yTest
# return pad_sequences(X_train), y_train, pad_sequences(X_test), y_test
def load_my_data():
print ('Loading data...')
df = pd.read_csv(input_file)
y = df['is_rumor']
X = df.drop("is_rumor",axis = 1)
xTrain, xTest, yTrain, yTest = test_train_split(X,y)
x_train = []
for i in xTrain:
one_feature = df[i]
x_train.append(one_feature)
return x_train, yTrain, xTest, yTest
def create_model(input_length):
print ('Creating model...')
model = Sequential()
model.add(Embedding(input_dim = 188, output_dim = 50, input_length = input_length))
model.add(LSTM(output_dim=256, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(output_dim=256, activation='sigmoid', inner_activation='hard_sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
print ('Compiling...')
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
X_train, y_train, X_test, y_test = load_my_data()
model = create_model(len(X_train[0]))
print("----------------checking--------------------------------------------")
print ('Fitting model...')
# hist = model.fit(X_train, y_train, batch_size=58, nb_epoch=10, validation_split = 0.1, verbose = 1)
hist=model.fit(X_tra`enter code here`in,y_train,batch_size=128,epochs=10,
validation_split=0.2,callbacks=[EarlyStopping(monitor='val_loss',min_delta=0.0001)])
score, acc = model.evaluate(X_test, y_test, batch_size=1)
print('Test score:', score)
print('Test accuracy:', acc)