我已经在这段代码上工作了一段时间,尽管我得到了想要的结果,例如准确性或F1得分。无论我怎么努力,我都无法获得混淆矩阵。 我尝试重用其他人发布的一些代码,但我遇到的最常见错误是“历史记录”对象没有此属性,因此,我们将不胜感激。 基本上,它的用途是将图像分类为某些类别
import random
import cv2
from keras.datasets import cifar10
from keras.utils import to_categorical
from keras import backend as K
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D, Dropout, GaussianNoise
from keras.optimizers import SGD, Adam, Adagrad
from keras.applications.resnet50 import ResNet50
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import pickle
import sklearn
from sklearn.model_selection import train_test_split
pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)
pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
from sklearn.metrics import confusion_matrix
def model(x_train, y_train, x_test, y_test, base_model, optimizer, loss_func, dropout, batch_size, frozen_layers):
# get layers and add average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# add fully-connected layer
x = Dense(512, activation='relu')(x)
x = Dropout(dropout)(x)
# add output layer
predictions = Dense(24, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
# freeze pre-trained model area's layer
for layer in base_model.layers:
layer.trainable = False
# update the weight that are added
model.compile(optimizer=optimizer, loss=loss_func)
model.fit(x_train, y_train)
# choose the layers which are updated by training
layer_num = len(model.layers)
for layer in model.layers[:int(layer_num * frozen_layers)]:
layer.trainable = False
for layer in model.layers[int(layer_num * frozen_layers):]:
layer.trainable = True
# update the weights
model.compile(optimizer=optimizer, loss=loss_func, metrics=['acc', recall_m, precision_m, f1_m])
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=10).predict(x_test)
#history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=x_train.shape[0] // batch_size, epochs=10, verbose=1, validation_data=(x_test,y_test))
return history
res_50_model = ResNet50(weights='imagenet', include_top=False)
#Hyperparameters
learning_rate = 0.0001
optimizer = Adagrad(learning_rate)
loss_func = 'binary_crossentropy' # binary_crossentropy, categorical_hinge or categorical_crossentropy
dropout = 0.4
batch_size = 64
frozen_layers = 0.9
history_res_50_1 = model(x_train_resized, y_train_hot_encoded, x_test_resized, y_test_hot_encoded, res_50_model, optimizer, loss_func, dropout, batch_size, frozen_layers)