我正在使用places365标准数据集来训练我的Keras CNN VGG16模型。我们将输出层从1000个类别更改为10个类别,分别是1.botanical_garden 2.cliff 3.creek 4.forest-broadleaf 5.islet 6.mountain 7.ocean 8.pier 9.skyscraper 10.temple-asia 每个类别都有5,000张训练图像,总共50,000张训练图像。 问题是在训练了50,000张图像的10个时间段后,我们仍然无法将精度降低20%。 如果有人能给我们一些建议,说明模型的准确性为何如此之低,我们将不胜感激,非常感谢。
模型如下:
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(224,224,3)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
sgd = SGD(lr=0.1, decay=1e-6, nesterov=True)
model.summary()
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
for i in range(10):
for j in range(50):
X = np.load( "C:/Users/firzen41616316/Desktop/numpydataKeras_1000x50/imgonehot_"+str((j+1)*1000)+".npy" )
Y = np.load( "C:/Users/firzen41616316/Desktop/numpydataKeras_1000x50/labelonehot_"+str((j+1)*1000)+".npy" )
model.fit(x = X, y = Y,
validation_split = 0.2,
epochs = 1,
verbose = 1)
print('Done training ', (j+1)*1000 ,' images')
print('Done training 50000 images, Epoch ', i ,' -------------')
以下是部分打印内容:
Done training 47000 images
Train on 800 samples, validate on 200 samples
Epoch 1/1
800/800 [==============================] - 29s 36ms/step - loss: 2.3021 - acc: 0.1187 - val_loss: 2.3036 - val_acc: 0.1050
Done training 48000 images
Train on 800 samples, validate on 200 samples
Epoch 1/1
800/800 [==============================] - 29s 36ms/step - loss: 2.3036 - acc: 0.1037 - val_loss: 2.3056 - val_acc: 0.1100
Done training 49000 images
Train on 800 samples, validate on 200 samples
Epoch 1/1
800/800 [==============================] - 29s 36ms/step - loss: 2.3028 - acc: 0.1187 - val_loss: 2.3042 - val_acc: 0.1050
Done training 50000 images
Done training 50000 images, Epoch 9 -------------