CIFAR-10测试集分类准确性在PyTorch和Keras上有所不同

时间:2019-09-10 03:32:34

标签: machine-learning keras deep-learning conv-neural-network pytorch

我在PyTorch中创建了一个自定义CNN,用于对CIFAR-10数据集中的10个类别进行分类。我在测试数据集上的分类准确度是45.739%,这是非常低的,我认为这是因为我的模型不是很深,但是我在Keras中实现了相同的模型,因此在测试数据集上的分类准确度是78.92%。 Keras没问题,但是我认为PyTorch程序中缺少一些东西。

我在PyTorch和Keras上使用了相同的模型架构,步幅,填充,辍学率,优化器,损失函数,学习率,批处理大小,纪元数,尽管如此,分类准确性的差异仍然很大因此我无法决定如何进一步调试PyTorch程序。

现在我怀疑3件事情:在Keras中,我使用了分类交叉熵损失函数(一个热矢量标签),在PyTorch中,我使用了标准交叉熵损失函数(标量索引标签),这可能是一个问题吗? ,如果不是,那么我怀疑我的训练循环或用于在PyTorch中计算分类准确性的代码。我在下面附上了我的两个程序,感谢您提出任何建议。

我在Keras中的程序

#================Function that defines the CNN model===========
def CNN_model():
    model = Sequential()

    model.add(Conv2D(32,(3,3),activation='relu',padding='same', input_shape=(size,size,channels))) #SAME PADDING
    model.add(Conv2D(32,(3,3),activation='relu')) #VALID PADDING
    model.add(MaxPooling2D(pool_size=(2,2))) #VALID PADDING
    model.add(Dropout(0.25))

    model.add(Conv2D(64,(3,3),activation='relu', padding='same')) #SAME PADDING
    model.add(Conv2D(64,(3,3),activation='relu')) #VALID PADDING
    model.add(MaxPooling2D(pool_size=(2,2))) #VALID PADDING
    model.add(Dropout(0.25))


    model.add(Conv2D(128,(3,3),activation='relu', padding='same')) #SAME PADDING
    model.add(Conv2D(128,(3,3),activation='relu')) #VALID PADDING
    model.add(MaxPooling2D(pool_size=(2,2),name='feature_extractor_layer')) #VALID PADDING
    model.add(Dropout(0.25))


    model.add(Flatten())

    model.add(Dense(512, activation='relu', name='second_last_layer'))
    model.add(Dropout(0.25))
    model.add(Dense(10, activation='softmax', name='softmax_layer')) #10 nodes in the softmax layer
    model.summary()

    return model



#=====Main program starts here========
#get_train_data() and get_test_data() are my own custom functions to get CIFAR-10 dataset
images_train, labels_train, class_train = get_train_data(0,10)
images_test, labels_test, class_test = get_test_data(0,10)

model = CNN_model()


model.compile(loss='categorical_crossentropy', #loss function of the CNN
             optimizer=Adam(lr=1.0e-4), #Optimizer
              metrics=['accuracy'])#'accuracy' metric is to be evaluated

#images_train and images_test contain images and 
#class_train and class_test contains one hot vectors labels 
model.fit(images_train,class_train,
          batch_size=128,
          epochs=50,
          validation_data=(images_test,class_test),
          verbose=1)

scores=model.evaluate(images_test,class_test,verbose=0)
print("Accuracy: "+str(scores[1]*100)+"% \n")

我在PyTorch中的程序:

#========DEFINE THE CNN MODEL=====
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, 3,1,1)#SAME PADDING
        self.conv2 = nn.Conv2d(32,32,3,1,0)#VALID PADDING
        self.pool1 = nn.MaxPool2d(2,2) #VALID PADDING
        self.drop1 = nn.Dropout2d(0.25) #DROPOUT OF 0.25

        self.conv3 = nn.Conv2d(32,64,3,1,1)#SAME PADDING
        self.conv4 = nn.Conv2d(64,64,3,1,0)#VALID PADDING
        self.pool2 = nn.MaxPool2d(2,2)#VALID PADDING
        self.drop2 = nn.Dropout2d(0.25) #DROPOUT OF 0.25

        self.conv5 = nn.Conv2d(64,128,3,1,1)#SAME PADDING
        self.conv6 = nn.Conv2d(128,128,3,1,0)#VALID PADDING
        self.pool3 = nn.MaxPool2d(2,2)#VALID PADDING
        self.drop3 = nn.Dropout2d(0.25) #DROPOUT OF 0.25

        self.fc1 = nn.Linear(128*2*2, 512)#128*2*2 IS OUTPUT DIMENSION AFTER THE PREVIOUS LAYER 
        self.drop4 = nn.Dropout(0.25) #DROPOUT OF 0.25
        self.fc2 = nn.Linear(512,10) #10 output nodes


    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.relu(self.conv2(x))
        x = self.pool1(x)
        x = self.drop1(x)


        x = F.relu(self.conv3(x))
        x = F.relu(self.conv4(x))
        x = self.pool2(x)
        x = self.drop2(x)

        x = F.relu(self.conv5(x))
        x = F.relu(self.conv6(x))
        x = self.pool3(x)
        x = self.drop3(x)


        x = x.view(-1,2*2*128) #FLATTENING OPERATION 2*2*128 IS OUTPUT AFTER THE PREVIOUS LAYER
        x = F.relu(self.fc1(x))
        x = self.drop4(x)
        x = self.fc2(x) #LAST LAYER DOES NOT NEED SOFTMAX BECAUSE THE LOSS FUNCTION WILL TAKE CARE OF IT
        return x



#=======FUNCTION TO CONVERT INPUT AND TARGET TO TORCH TENSORS AND LOADING INTO GPU======
def PrepareInputDataAndTargetData(device,images,labels,batch_size):

    #GET MINI BATCH OF TRAINING IMAGES AND RESHAPE THE TORCH TENSOR FOR CNN PROCESSING
    mini_batch_images = torch.tensor(images)
    mini_batch_images = mini_batch_images.view(batch_size,3,32,32)

    #GET MINI BATCH OF TRAINING LABELS, TARGET SHOULD BE IN LONG FORMAT SO CONVERT THAT TOO
    mini_batch_labels = torch.tensor(labels)
    mini_batch_labels = mini_batch_labels.long()

    #FEED THE INPUT DATA AND TARGET LABELS TO GPU
    mini_batch_images = mini_batch_images.to(device)
    mini_batch_labels = mini_batch_labels.to(device)

    return mini_batch_images,mini_batch_labels

#==========MAIN PROGRAM==========
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#get_train_data() and get_test_data() are my own custom functions to get CIFAR-10 dataset 
Images_train, Labels_train, Class_train = get_train_data(0,10)
Images_test, Labels_test, Class_test = get_test_data(0,10)

net = Net()
net = net.double() #https://discuss.pytorch.org/t/runtimeerror-expected-object-of-scalar-type-double-but-got-scalar-type-float-for-argument-2-weight/38961
print(net)

#MAP THE MODEL ONTO THE GPU
net = net.to(device)

#CROSS ENTROPY LOSS FUNCTION AND ADAM OPTIMIZER
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=1e-4)

#PREPARE THE DATALOADER
#Images_train contains images and Labels_trains contains indices i.e. 0,1,...,9 
dataset = TensorDataset( Tensor(Images_train), Tensor(Labels_train) )
trainloader = DataLoader(dataset, batch_size= 128, shuffle=True)

#START TRAINING THE CNN MODEL FOR 50 EPOCHS
for epoch in range(0,50):
    for i, data in enumerate(trainloader, 0):
        inputs, labels = data
        inputs = torch.tensor(inputs).double()
        inputs = inputs.view(len(inputs),3,32,32) #RESHAPE THE IMAGES
        labels = labels.long() #MUST CONVERT LABEL TO LONG FORMAT

        #MAP THE INPUT AND LABELS TO THE GPU
        inputs=inputs.to(device)
        labels=labels.to(device)

        #FORWARD PROP, BACKWARD PROP, PARAMETER UPDATE
        optimizer.zero_grad()
        outputs = net.forward(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()


#CALCULATE CLASSIFICATION ACCURACY ON ALL 10 CLASSES
with torch.no_grad():
    Images_class,Labels_class = PrepareInputDataAndTargetData(device,Images_test,Labels_test,len(Images_test))
    network_outputs = net.forward(Images_class)
    correct = (torch.argmax(network_outputs.data,1) == Labels_class.data).float().sum()
    acc = float(100.0*(correct/len(Images_class)))
    print("Accuracy is: "+str(acc)+"\n")

    del Images_class
    del Labels_class
    del network_outputs
    del correct
    del acc
    torch.cuda.empty_cache()


print("Done\n")

我不完全了解这两个库中实际的核心后端如何工作,但是我认为无论使用哪种库,任何模型的分类精度都应该几乎相同。

0 个答案:

没有答案