我正在尝试在python中使用keras运行训练模型。训练模型(.fit()行)时遇到错误
它说,当我输入用于验证数据的数据时,它的张量应为32个值,而大小应该为1
我尝试在CPU上运行它,并且运行良好。但是尝试在GPU上运行它会向我显示此错误(不确定是否相关)
from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from keras.layers import Activation, Convolution2D, Dropout, Conv2D
from keras.layers import AveragePooling2D, BatchNormalization
from keras.layers import GlobalAveragePooling2D
from keras.models import Sequential
from keras.layers import Flatten
from keras.models import Model
from keras.layers import Input
from keras.layers import MaxPooling2D
from keras.layers import SeparableConv2D
from keras import layers
from keras.regularizers import l2
import pandas as pd
import cv2
import numpy as np
dataset_path = 'fer2013.csv'
image_size=(48,48)
# parameters
batch_size = 32
num_epochs = 110
input_shape = (48, 48, 1)
validation_split = .2
verbose = 1
num_classes = 7
patience = 50
base_path = 'out/'
l2_regularization=0.01
def load_fer2013():
data = pd.read_csv(dataset_path)
pixels = data['pixels'].tolist()
width, height = 48, 48
faces = []
for pixel_sequence in pixels:
face = [int(pixel) for pixel in pixel_sequence.split(' ')]
face = np.asarray(face).reshape(width, height)
face = cv2.resize(face.astype('uint8'),image_size)
faces.append(face.astype('float32'))
faces = np.asarray(faces)
faces = np.expand_dims(faces, -1)
emotions = pd.get_dummies(data['emotion']).as_matrix()
return faces, emotions
def preprocess_input(x, v2=True):
x = x.astype('float32')
x = x / 255.0
if v2:
x = x - 0.5
x = x * 2.0
return x
# data generator
data_generator = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=.1,
horizontal_flip=True)
# model parameters/compilation
# model = mini_XCEPTION(input_shape, num_classes)
regularization = l2(l2_regularization)
# base
img_input = Input(input_shape)
x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
use_bias=False)(img_input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# module 1
residual = Conv2D(16, (1, 1), strides=(2, 2), padding='same', use_bias=False)
(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(16, (3, 3), padding='same',
kernel_regularizer=regularization, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(16, (3, 3), padding='same',
kernel_regularizer=regularization, use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
model = Model(img_input, output)
model.compile(optimizer='adam', loss='categorical_crossentropy',metrics=
['accuracy'])
model.summary()
# callbacks
log_file_path = base_path + '_emotion_training.log'
csv_logger = CSVLogger(log_file_path, append=False)
early_stop = EarlyStopping('val_loss', patience=patience)
reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1,
patience=int(patience/4), verbose=1)
trained_models_path = base_path + '_mini_XCEPTION'
model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
model_checkpoint = ModelCheckpoint(model_names, 'val_loss',
verbose=1,save_best_only=True)
callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]
# loading dataset
faces, emotions = load_fer2013()
faces = preprocess_input(faces)
num_samples, num_classes = emotions.shape
xtrain, xtest,ytrain,ytest = train_test_split(faces,
emotions,test_size=0.2,shuffle=True)
model.fit_generator(data_generator.flow(xtrain, ytrain,
batch_size),
steps_per_epoch=len(xtrain) / batch_size,
epochs=num_epochs, verbose=1, callbacks=callbacks,
validation_data=(xtest,ytest))
错误:
enterInvalidArgumentError Traceback (most recent call last)
<ipython-input-6-20ffaf91fcd2> in <module>
150 steps_per_epoch=len(xtrain) / batch_size,
151 epochs=num_epochs, verbose=1,
callbacks=callbacks,
--> 152 validation_data=(xtest,ytest))
~\Anaconda3\lib\site-packages\keras\legacy\interfaces.py in wrapper(*args,
**kwargs)
89 warnings.warn('Update your `' + object_name + '` call
to the ' +
90 'Keras 2 API: ' + signature,
stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
~\Anaconda3\lib\site-packages\keras\engine\training.py in fit_generator(self,
generator, steps_per_epoch, epochs, verbose, callbacks, validation_data,
validation_steps, class_weight, max_queue_size, workers, use_multiprocessing,
shuffle, initial_epoch)
1416 use_multiprocessing=use_multiprocessing,
1417 shuffle=shuffle,
-> 1418 initial_epoch=initial_epoch)
1419
1420 @interfaces.legacy_generator_methods_support
~\Anaconda3\lib\site-packages\keras\engine\training_generator.py in
fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks,
validation_data, validation_steps, class_weight, max_queue_size, workers,
use_multiprocessing, shuffle, initial_epoch)
215 outs = model.train_on_batch(x, y,
216
sample_weight=sample_weight,
--> 217
class_weight=class_weight)
218
219 outs = to_list(outs)
~\Anaconda3\lib\site-packages\keras\engine\training.py in
train_on_batch(self, x, y, sample_weight, class_weight)
1215 ins = x + y + sample_weights
1216 self._make_train_function()
-> 1217 outputs = self.train_function(ins)
1218 return unpack_singleton(outputs)
1219
~\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py in
__call__(self, inputs)
2713 return self._legacy_call(inputs)
2714
-> 2715 return self._call(inputs)
2716 else:
2717 if py_any(is_tensor(x) for x in inputs):
~\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py in
_call(self, inputs)
2673 fetched = self._callable_fn(*array_vals,
run_metadata=self.run_metadata)
2674 else:
-> 2675 fetched = self._callable_fn(*array_vals)
2676 return fetched[:len(self.outputs)]
2677
~\Anaconda3\lib\site-packages\tensorflow\python\client\session.py in
__call__(self, *args, **kwargs)
1437 ret = tf_session.TF_SessionRunCallable(
1438 self._session._session, self._handle, args, status,
-> 1439 run_metadata_ptr)
1440 if run_metadata:
1441 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~\Anaconda3\lib\site-packages\tensorflow\python\framework\errors_impl.py in
__exit__(self, type_arg, value_arg, traceback_arg)
526 None, None,
527 compat.as_text(c_api.TF_Message(self.status.status)),
--> 528 c_api.TF_GetCode(self.status.status))
529 # Delete the underlying status object from memory otherwise it
stays alive
530 # as there is a reference to status from this from the traceback
due to
InvalidArgumentError: Input to reshape is a tensor with 32 values, but the
requested shape has 1
[[{{node training_3/Adam/gradients/loss_3/predictions_loss/Mean_grad/Reshape}} = Reshape[T=DT_FLOAT, Tshape=DT_INT32, _class=["loc:@training_3/Adam/gradients/loss_3/predictions_loss/Mean_grad/truediv"], _device="/job:localhost/replica:0/task:0/device:GPU:0"](training_3/Adam/gradients/loss_3/predictions_loss/mul_1_grad/Sum, training_3/Adam/gradients/loss_3/predictions_loss/Mean_grad/DynamicStitch/_2655)]] code here