我使用
将RGB彩色图像数据集转换为灰度图像from PIL import Image, ImageEnhance
im = Image.open('../DR datasets/DR-60.20.20/val/level_4/4.png')
im=im.convert('L')
enhancer1 = ImageEnhance.Contrast(im)
enhancer2 = ImageEnhance.Sharpness(im)
factorBrightness =100
factorSharpness=2
output= enhancer1.enhance(factorBrightness)
output= enhancer2.enhance(factorSharpness)
print("Brightness and Sharpness Changed")
output.save('../DR datasets/greyImage/4/greyimage.png')
然后我将此数据集输入到 CNN 模型中
model = models.Sequential()
model.add(layers.Conv2D(32,3,padding="same",kernel_initializer='he_uniform',activation= 'relu',input_shape=(224,224,1)))
model.add(layers.MaxPooling2D((2, 2),padding="same"))
model.add(layers.Conv2D(32,3,padding="same",kernel_initializer='he_uniform', activation= 'relu'))
model.add(layers.MaxPooling2D((2, 2),padding="same"))
model.add(layers.Conv2D(64,3,kernel_initializer='he_uniform', padding="same", activation='relu'))
model.add(layers.MaxPooling2D((2, 2),padding="same"))
model.add(layers.Conv2D(64,3, kernel_initializer='he_uniform',padding="same", activation='relu'))
model.add(layers.MaxPooling2D((2, 2),padding="same"))
model.add(layers.Conv2D(128,3, kernel_initializer='he_uniform',padding="same", activation='relu'))
model.add(layers.MaxPooling2D((2, 2),padding="same"))
model.add(layers.Conv2D(128,3,kernel_initializer='he_uniform', padding="same", activation='relu'))
model.add(layers.MaxPooling2D((2, 2),padding="same"))
model.add(layers.Flatten())
model.add(layers.Dense(100,kernel_initializer='he_uniform',activation='relu'))
model.add(layers.Dense(50,kernel_initializer='he_uniform',activation='relu'))
model.add(layers.Dense(25,kernel_initializer='he_uniform',activation='relu'))
model.add(layers.Dense(15,kernel_initializer='he_uniform',activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(5,activation="softmax"))
img_height, img_width = 224,224
# the no. imgaes to load at each iteration
batch_size = 32
# only rescaling
train_datagen = ImageDataGenerator(
rescale=1./255
)
test_datagen = ImageDataGenerator(
rescale=1./255
)
# these are generators for train/test data that will read pictures #found in the defined subfolders of 'data/'
print('Total number of images for "training":')
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size = (img_height, img_width),
batch_size = batch_size,
class_mode = "categorical",shuffle = False, color_mode='grayscale'
)
#train_generator = train_generator.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)
print('Total number of images for "validation":')
val_generator = test_datagen.flow_from_directory(
val_data_dir,
target_size = (img_height, img_width),
batch_size = batch_size,
class_mode = "categorical",color_mode='grayscale',
shuffle=False)
print('Total number of images for "testing":')
test_generator = test_datagen.flow_from_directory(
test_data_dir,
target_size = (img_height, img_width),
batch_size = batch_size,
class_mode = "categorical",
shuffle=False)
然后我使用以下代码编译和训练模型:
learning_rate =1e-4
epochs = 30
checkpoint = ModelCheckpoint("mqmodifiedvgg_classifier.h5", monitor = 'val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
model.compile(loss="categorical_crossentropy", optimizer=tensorflow.optimizers.Adam(lr=learning_rate), metrics = ['acc'])
history = model.fit(train_generator,
epochs=epochs,
shuffle=True,
validation_data=val_generator,
#steps_per_epoch=100,
callbacks=[checkpoint]
)
但是它在 1 个 epoch 完成后没有训练模型它会抛出错误
<块引用>NotFoundError:没有算法起作用! [[节点 sequence_5/conv2d_25/Conv2D(定义于 :4)]] [操作:__inference_test_function_22208]
函数调用栈:test_function
我使用 len(image.shape) 检查长度,它返回 2,这意味着图像是 1 个通道,但仍然没有让模型训练 提前致谢