我有一个输入大小为32X32X3的数据集。我想使用VGG16 Keras实现对数据集进行分类。但是在训练阶段,我得到以下错误。如果您能提供帮助,我将不胜感激。谢谢...
错误:
Traceback (most recent call last):
File "VGG16_3.py", line 211, in <module>
model = DroneVgg()
File "VGG16_3.py", line 31, in __init__
self.model = self.train(self.model)
File "VGG16_3.py", line 204, in train
validation_data=(x_test, y_test),callbacks=[reduce_lr],verbose=2)
File "/truba/home/atkorez/anaconda3/envs/CPU_Env/lib/python3.6/site- packages/keras/legacy/interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "/truba/home/atkorez/anaconda3/envs/CPU_Env/lib/python3.6/site-packages/keras/engine/training.py", line 1418, in fit_generator
initial_epoch=initial_epoch)
File "/truba/home/atkorez/anaconda3/envs/CPU_Env/lib/python3.6/site-packages/keras/engine/training_generator.py", line 144, in fit_generator
val_x, val_y, val_sample_weight)
File "/truba/home/atkorez/anaconda3/envs/CPU_Env/lib/python3.6/site-packages/keras/engine/training.py", line 789, in _standardize_user_data
exception_prefix='target')
File "/truba/home/atkorez/anaconda3/envs/CPU_Env/lib/python3.6/site-packages/keras/engine/training_utils.py", line 128, in standardize_input_data
'with shape ' + str(data_shape))
ValueError: Error when checking target: expected activation_15 to have 2 dimensions, but got array with shape (329038, 6, 6)
修改 我发送了2条消息,因为在一条消息中收到太多代码错误。
此处的Python代码:
def veri_yukle():
x_train = np.load('/truba/home/atkorez/CapsNet/x_train.npy')
x_test = np.load('/truba/home/atkorez/CapsNet/x_test.npy')
y_train = np.load('/truba/home/atkorez/CapsNet/y_train.npy')
y_test = np.load('/truba/home/atkorez/CapsNet/y_test.npy')
return (x_train, y_train), (x_test, y_test)
class DroneVgg:
def __init__(self,train=True):
self.num_classes = 6
self.weight_decay = 0.0005
self.x_shape = [32,32,1]
self.model = self.build_model()
if train:
self.model = self.train(self.model)
else:
self.model.load_weights('DroneVgg.h5')
def build_model(self):
model = Sequential()
weight_decay = self.weight_decay
model.add(Conv2D(64, (3, 3), padding='same',
input_shape=self.x_shape,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(self.num_classes))
model.add(Activation('softmax'))
return model
def normalize(self,X_train,X_test):
mean = np.mean(X_train,axis=(0,1,2,3))
std = np.std(X_train, axis=(0, 1, 2, 3))
X_train = (X_train-mean)/(std+1e-7)
X_test = (X_test-mean)/(std+1e-7)
return X_train, X_test
def train(self,model):
#training parameters
batch_size = 128
maxepoches = 250
learning_rate = 0.1
lr_decay = 1e-6
lr_drop = 20
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = veri_yukle()
print('Veriler Yuklendi')
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train, x_test = self.normalize(x_train, x_test)
y_train = keras.utils.to_categorical(y_train, self.num_classes)
y_test = keras.utils.to_categorical(y_test, self.num_classes)
def lr_scheduler(epoch):
return learning_rate * (0.5 ** (epoch // lr_drop))
reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler)
#data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
#optimization details
sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
# training process in a for loop with learning rate drop every 25 epoches.
historytemp = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=maxepoches,
validation_data=(x_test, y_test),callbacks=[reduce_lr],verbose=2)
model.save_weights('DroneVgg.h5')
return model