我正在使用卷积神经网络进行分类,并在youtube上观看了该网站的视频[它解释了混淆矩阵以及如何p 1,我也根据youtube使用的代码解释为:
`import seaborn as sns
# Predict the values from the validation dataset
Y_pred = model.predict(X_test)
# Convert predictions classes to one hot vectors
Y_pred_classes = np.argmax(Y_pred,axis = 1)
# Convert validation observations to one hot vectors
print(Y_pred_classes)
Y_true = np.argmax(y_test,axis = 1)
print(Y_true)
# compute the confusion matrix
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
# plot the confusion matrix
f,ax = plt.subplots(figsize=(8, 8))
sns.heatmap(confusion_mtx, annot=True, linewidths=0.01,cmap="Greens",linecolor="gray", fmt= '.1f',ax=ax)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.title("Confusion Matrix")
plt.show()`
但是我的输出不是数组,而是一种矩阵:
[5 0 6 ... 0 2 0]
[5 0 6 ... 0 2 0]
...
[6 0 6 ... 0 3 0]
[6 0 6 ... 0 2 0]
[6 0 6 ... 0 2 0]]
[[4 0 4 ... 0 2 0]
[6 0 6 ... 0 2 0]
[6 0 6 ... 0 2 0]
...
[0 0 0 ... 0 3 0]
[6 0 6 ... 0 2 0]
[6 0 6 ... 0 2 0]]
[[2 0 4 ... 0 1 0]
[6 0 6 ... 0 3 0]
[6 0 6 ... 0 3 0]
...
[0 0 0 ... 0 3 0]
[6 0 6 ... 0 3 0]
[6 0 6 ... 0 2 0]]
...
[[5 0 5 ... 0 3 0]
[6 0 6 ... 0 3 0]
[6 0 6 ... 0 3 0]
...
[0 0 6 ... 0 3 0]
[6 0 6 ... 0 3 0]
[0 0 6 ... 0 3 0]]
[0 0 0 0 0 0 1]`
所以我应该怎么做才能将预测矩阵变成一个数组,我也尝试了keras预言类,但是我没有为我工作。
因为它应该像`
y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])`
但不是。 谢谢
我的整个模型在这里更多:
import numpy as np
import os
import time
from keras.preprocessing import image
from keras.layers import GlobalMaxPooling2D, Dense, Dropout,Activation,Flatten
from imagenet_utils import preprocess_input
from keras.applications.resnet50 import preprocess_input, decode_predictions
from keras.layers import Input
from keras.models import Model
from keras.utils import np_utils
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
#from sklearn.cross_validation import train_test_split
# Loading the training data
PATH = '/content/drive/My Drive/female'
# Define data path
data_path = PATH + '/eeg2'
data_dir_list = os.listdir(data_path)
img_data_list=[]
for dataset in data_dir_list:
img_list=os.listdir(data_path+'/'+ dataset)
print ('Loaded the images of dataset-'+'{}\n'.format(dataset))
for img in img_list:
img_path = data_path + '/'+ dataset + '/'+ img
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print('Input image shapeGGGGGG:', x.shape)
img_data_list.append(x)
img_data = np.array(img_data_list)
#img_data = img_data.astype('float32')
print (img_data.shape)
img_data=np.rollaxis(img_data,1,0)
print (img_data.shape)
img_data=img_data[0]
print (img_data.shape)
# Define the number of classes
num_classes = 2
num_of_samples = img_data.shape[0]
labels = np.ones((num_of_samples,),dtype='int64')
labels[0:23]=0
labels[23:]=1
#labels[96:132]=2
#labels[132:]=3
# names = ['birbucuk','birileucarasi','uciledortarasi','besileyediarasi']
# convert class labels to on-hot encoding
Y = np_utils.to_categorical(labels, num_classes)
print(Y)
#Shuffle the dataset
x,y = shuffle(img_data,Y, random_state=2)
# Split the dataset
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.15, random_state=42)
from keras.applications.resnet50 import ResNet50
from keras.models import Model
import time
from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten, BatchNormalization
from keras.models import Sequential
model =ResNet50 (
include_top = False,
weights = None)
model = ResNet50(weights='imagenet',include_top=False ,input_tensor=None, input_shape=None, pooling=None, classes=2)
model.summary()
last_layer = model.output
# add a global spatial average pooling layer
x = GlobalAveragePooling2D()(last_layer)
# add fully-connected & dropout layers
x = Dense(512, activation='relu',name='fc-1')(x)
x = Dropout(0.6)(x)
x = Dense(512, activation='relu',name='fc-2')(x)
x = Dropout(0.5)(x)
# a softmax layer for 4 classes
out = Dense(2, activation='softmax',name='output_layer')(x)
# this is the model we will train
custom_resnet_model2 = Model(inputs=model.input, outputs=out)
custom_resnet_model2.summary()
for layer in custom_resnet_model2.layers[:]:
layer.trainable = True
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
from keras import losses
from keras.optimizers import SGD
custom_resnet_model2.compile(loss="categorical_crossentropy",optimizer=optimizer,metrics=['accuracy'])
t=time.time()
hist = custom_resnet_model2.fit(X_train, y_train, batch_size=32, epochs=50, verbose=1, validation_data=(X_test, y_test))
print('Training time: %s' % (t - time.time()))
(loss, accuracy) = custom_resnet_model2.evaluate(X_test, y_test, batch_size=32, verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
from sklearn.metrics import confusion_matrix
import seaborn as sns
# Predict the values from the validation dataset
Y_pred = model.predict(X_test)
# Convert predictions classes to one hot vectors
Y_pred_classes = np.argmax(Y_pred,axis = 1)
# Convert validation observations to one hot vectors
print(Y_pred_classes)
Y_true = np.argmax(y_test,axis = 1)
print(Y_true)
# compute the confusion matrix
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
# plot the confusion matrix
f,ax = plt.subplots(figsize=(8, 8))
sns.heatmap(confusion_mtx, annot=True, linewidths=0.01,cmap="Greens",linecolor="gray", fmt= '.1f',ax=ax)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.title("Confusion Matrix")
plt.show()