模型测试和验证的准确性没有提高

时间:2020-07-23 12:31:04

标签: machine-learning cnn supervised-learning vgg-net

我从USF database下载了乳腺癌。下载数据集后,我将LJPEG文件转换为PNG,执行了CLAHE以提高图像质量,并使用标记的GIF文件作为参考来提取癌症区域(来自网站)。然后,我使用剪辑限制为5、10、15和20的cv2进行了对比度受限的自适应直方图均衡化(CLAHE)。 为了进行模型训练,使用了以下方法:

  1. 具有最后一层SoftMax的VGG19,可将类别从1000减少到2。VGG19模型层设置为不可训练。
  2. 带有最后一层SoftMax的VGG19,将类别从1000减少到2。将前25层VGG19模型层设置为不可训练。
  3. 具有最后一层SoftMax的VGG19,可将类别从1000减少到2。将VGG19模型的前10层设置为不可训练。
  4. 最后一层为relu / tanh / Sigmoid的VGG19,其辍学率为0.25 / 0.5,并且将SoftMax的类别从1000个减少到2个。VGG19模型层设置为不可训练。
  5. 基于Keras的自定义CNN网络。 对于优化程序,我使用了Adam,Adagrad和Adadelta。 我尝试了所有这些方法,但是:
  6. 在某些情况下,模型训练的准确性和损失在2-10个步骤后停止变化。
  7. 在某些情况下,模型训练的准确性和损失在2到10步之后开始下降并增加。
  8. 在某些情况下,模型训练的准确性和损失在2-10个步骤后开始增加和减少,但验证损失/准确性并未改善。

此后,我通过提取癌症部位来改善训练数据,从而使剩余图像变暗,并将癌症斑块恢复到图像上。癌症部位的整体外观得到改善,但结果相同。模型训练的准确性没有增加,模型验证的准确性也没有增加。附加了在此步骤中预处理过的图像。 Image 我使用的基本代码是:

1。

import os
import tensorflow as tf
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.layers import Dense
import cv2
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg19 import preprocess_input
from tensorflow.keras.models import Model
import numpy as np
#import torch
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.optimizers import Adagrad

base_model = VGG19(weights='imagenet',include_top = 'False')
x = base_model.output
preds=Dense(2,activation='softmax')(x)
#model = Model(inputs=base_model.input, outputs=base_model.get_layer('flatten').output)
model = Model(inputs=base_model.input, outputs=preds)
def get_features(img_path):
    img = image.load_img(img_path, target_size=(224, 224,3))
    x = image.img_to_array(img)
    return x
for layer in base_model.layers:
    layer.trainable = False

NUM_EPOCHS = 1000
INIT_LR = 1e-3
opt = Adagrad(lr=INIT_LR, decay=INIT_LR / NUM_EPOCHS)
#opt = tf.keras.optimizers.Adam(lr = 0.01)
model.compile(loss="categorical_crossentropy", optimizer=opt,
    metrics=["accuracy"])    
#model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
X = []
y = []

car_plots = []
bp =  'E:/benign'
cp = 'E:/cancer'
print("Fetching Data")
for (_,_,filenames) in os.walk(bp):
    car_plots.extend(filenames)
    break

for cplot in car_plots:
    X.append(get_features(bp + '/' + cplot))
    y.append(0)

bike_plots = []
for (_,_,filenames) in os.walk(cp):
    bike_plots.extend(filenames)
    break

for cplot in bike_plots:
    X.append(get_features(cp +'/' + cplot))
    y.append(1)
yt = np.array(y)
y_b = to_categorical(yt)

# Include the epoch in the file name (uses `str.format`)
checkpoint_path = "E:/training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)


#filepath = "E:/training_2/cp-{epoch:04d}.ckpt"
checkpoint = tf.keras.callbacks.ModelCheckpoint(
    filepath=checkpoint_path, 
    verbose=1, 
    save_weights_only=True,
    period=1000)
callbacks_list = [checkpoint]

# Save the weights using the `checkpoint_path` format
model.save_weights(checkpoint_path.format(epoch=0))

Xt = np.array(X)
#Xt = torch.from_numpy(Xt)
print("Starting Training");
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(Xt, y_b, test_size=0.01, random_state=42, stratify=y)
model.fit(X_train,y_train,epochs=100, batch_size=32,callbacks=callbacks_list,validation_data=[X_test,y_test])
model.save('model1tf')
import os
from keras.applications.vgg19 import VGG19
from keras.layers import Dense, Flatten, Dropout
from keras import Sequential
import cv2
from keras.preprocessing import image
from keras.applications.vgg19 import preprocess_input
from keras.models import Model
import numpy as np
import torch
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import SeparableConv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dropout
from keras.layers.core import Dense

import keras
chanDim = -1
base_model = VGG19(weights='imagenet',include_top = 'False', input_shape=(224,224,3))

models = Sequential()

models.add(Flatten(input_shape=base_model.output_shape[1:]))

models.add(SeparableConv2D(32, (3, 3), padding="same"))
models.add(Activation("relu"))
models.add(BatchNormalization(axis=chanDim))
models.add(MaxPooling2D(pool_size=(2, 2)))
models.add(Dropout(0.25))

        # (CONV => RELU => POOL) * 2
models.add(SeparableConv2D(64, (3, 3), padding="same"))
models.add(Activation("relu"))
models.add(BatchNormalization(axis=chanDim))
models.add(SeparableConv2D(64, (3, 3), padding="same"))
models.add(Activation("relu"))
models.add(BatchNormalization(axis=chanDim))
models.add(MaxPooling2D(pool_size=(2, 2)))
models.add(Dropout(0.25))

        # (CONV => RELU => POOL) * 3
models.add(SeparableConv2D(128, (3, 3), padding="same"))
models.add(Activation("relu"))
models.add(BatchNormalization(axis=chanDim))
models.add(SeparableConv2D(128, (3, 3), padding="same"))
models.add(Activation("relu"))
models.add(BatchNormalization(axis=chanDim))
models.add(SeparableConv2D(128, (3, 3), padding="same"))
models.add(Activation("relu"))
models.add(BatchNormalization(axis=chanDim))
models.add(MaxPooling2D(pool_size=(2, 2)))
models.add(Dropout(0.25))

        # first (and only) set of FC => RELU layers
models.add(Flatten())
models.add(Dense(256))
models.add(Activation("relu"))
models.add(BatchNormalization())
models.add(Dropout(0.5))

        # softmax classifier
models.add(Dense(2))
models.add(Activation("softmax"))


#model = Model(inputs=base_model.input, outputs=models)
#model.add(models)
model = Model(inputs=base_model.input, outputs=models(base_model.output))
def get_features(img_path):
    img = image.load_img(img_path, target_size=(224, 224,3))
    x = image.img_to_array(img)
    #x = np.expand_dims(x, axis=0)
    #x = preprocess_input(x)
    #flatten = model.predict(x)
    #print(len(flatten[0]))
    return x
for layer in base_model.layers:
    layer.trainable = False
#model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
#opt = keras.optimizers.Adam(learning_rate=0.001)
opt =  keras.optimizers.Adadelta( learning_rate=0.01, rho=0.8, epsilon=1e-04)
model.compile(optimizer=opt, loss='mean_absolute_error')
X = []
y = []

car_plots = []
print("Fetching Data")
bp =  'E:/benign'
cp = 'E:/cancer'
print("Fetching Data")

count = 0
for (_,_,filenames) in os.walk(bp):
    car_plots.extend(filenames)
    break

for cplot in car_plots:
    X.append(get_features(bp + '/' + cplot))
    y.append(0)

bike_plots = []
for (_,_,filenames) in os.walk(cp):
    bike_plots.extend(filenames)
    break

for cplot in bike_plots:
    X.append(get_features(cp +'/' + cplot))
    y.append(1)
for Xm in X:
    Xm /=255
yt = np.array(y)
#yt = torch.from_numpy(yt)
#X /=255
y_b = to_categorical(yt)


#model.load_weights('training/cp-5000.ckpt')

# Include the epoch in the file name (uses `str.format`)
checkpoint_path = "E:/training_1/cp1.ckpt" #"training/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)


#filepath = "training/cp-{epoch:04d}.ckpt"
checkpoint = ModelCheckpoint(filepath = checkpoint_path, verbose=1, period= 50)
callbacks_list = [checkpoint]

# Save the weights using the `checkpoint_path` format
model.save_weights(checkpoint_path.format(epoch=0))


Xt = np.array(X)
#Xt = torch.from_numpy(Xt)
print("Starting Training");
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(Xt, y_b, test_size=0.20, random_state=42, stratify=y)
model.fit(X_train,y_train,epochs=100, batch_size=16,callbacks=callbacks_list,validation_data=[X_test,y_test])
model.save('model9.hdf')

请指导我做错了什么。

0 个答案:

没有答案