神经网络pytorch

时间:2018-07-08 16:35:32

标签: python machine-learning conv-neural-network pytorch

我在pytorch中非常陌生,并实现了自己的图像分类器网络。但是我看到对于每个时期,训练的准确性都非常好,但是直到第5个时期为止,验证准确性都是0.i。我正在使用Adam优化器,并且学习速度为.001。在每个时期之后还将整个数据集重新采样为训练n验证集。请帮我解决问题。

这是我的代码:

### where is data?
data_dir_train = '/home/sup/PycharmProjects/deep_learning/CNN_Data/training_set'
data_dir_test = '/home/sup/PycharmProjects/deep_learning/CNN_Data/test_set'

# Define your batch_size
batch_size = 64

allData = datasets.ImageFolder(root=data_dir_train,transform=transformArr)


# We need to further split our training dataset into training and validation sets.
def split_train_validation():
    # Define the indices
    num_train = len(allData)
    indices = list(range(num_train)) # start with all the indices in training set
    split = int(np.floor(0.2 * num_train)) # define the split size
    #train_idx, valid_idx = indices[split:], indices[:split]

    # Random, non-contiguous split
    validation_idx = np.random.choice(indices, size=split, replace=False)
    train_idx = list(set(indices) - set(validation_idx))
    # define our samplers -- we use a SubsetRandomSampler because it will return
    # a random subset of the split defined by the given indices without replacement
    train_sampler = SubsetRandomSampler(train_idx)
    validation_sampler = SubsetRandomSampler(validation_idx)

    #train_loader = DataLoader(allData,batch_size=batch_size,sampler=train_sampler,shuffle=False,num_workers=4)
    #validation_loader = DataLoader(dataset=allData,batch_size=1, sampler=validation_sampler)

    return (train_sampler,validation_sampler)

培训

from torch.optim import Adam
import torch
import createNN
import torch.nn as nn
import loadData as ld
from torch.autograd import  Variable
from torch.utils.data import DataLoader

# check if cuda - GPU support available
cuda = torch.cuda.is_available()

#create model, optimizer and loss function
model = createNN.ConvNet(class_num=2)
optimizer = Adam(model.parameters(),lr=.001,weight_decay=.0001)
loss_func = nn.CrossEntropyLoss()

if cuda:
    model.cuda()

# function to save model
def save_model(epoch):
    torch.save(model.load_state_dict(),'imageClassifier_{}.model'.format(epoch))
    print('saved model at epoch',epoch)

def exp_lr_scheduler (  epoch , init_lr = args.lr, weight_decay = args.weight_decay, lr_decay_epoch = cf.lr_decay_epoch):
 lr = init_lr * ( 0.5 ** (epoch // lr_decay_epoch))

def train(num_epochs):
    best_acc = 0.0

    for epoch in range(num_epochs):
        print('\n\nEpoch {}'.format(epoch))
        train_sampler, validation_sampler = ld.split_train_validation()
        train_loader = DataLoader(ld.allData, batch_size=30, sampler=train_sampler, shuffle=False)
        validation_loader = DataLoader(dataset=ld.allData, batch_size=1, sampler=validation_sampler)
        model.train()
        acc = 0.0
        loss = 0.0
        total = 0
        # train model with training data
        for i,(images,labels) in enumerate(train_loader):
            # if cuda then move to GPU
            if cuda:
                images = images.cuda()
                labels = labels.cuda()
            # Variable class wraps a tensor and we can calculate grad
            images = Variable(images)
            labels = Variable(labels)
            # reset accumulated gradients for each batch
            optimizer.zero_grad()
            # pass images to model which returns preiction
            output = model(images)
            #calculate the loss based on prediction and actual
            loss = loss_func(output,labels)
            # backpropagate the loss and compute gradient
            loss.backward()
            # update weights as per the computed gradients
            optimizer.step()

            # prediction class
            predVal , predClass = torch.max(output.data, 1)
            acc += torch.sum(predClass == labels.data)
            loss += loss.cpu().data[0]
            total += labels.size(0)
        # print the statistics
        train_acc = acc/total
        train_loss = loss / total
        print('Mean train acc = {} over epoch = {}'.format(epoch,acc))
        print('Mean train loss = {} over epoch = {}'.format(epoch, loss))

        # Valid model with validataion data
        model.eval()
        acc = 0.0
        loss = 0.0
        total = 0
        for i,(images,labels) in enumerate(validation_loader):
            # if cuda then move to GPU
            if cuda:
                images = images.cuda()
                labels = labels.cuda()
            # Variable class wraps a tensor and we can calculate grad
            images = Variable(images)
            labels = Variable(labels)
            # reset accumulated gradients for each batch
            optimizer.zero_grad()
            # pass images to model which returns preiction
            output = model(images)
            #calculate the loss based on prediction and actual
            loss = loss_func(output,labels)
            # backpropagate the loss and compute gradient
            loss.backward()
            # update weights as per the computed gradients
            optimizer.step()

            # prediction class
            predVal, predClass = torch.max(output.data, 1)
            acc += torch.sum(predClass == labels.data)
            loss += loss.cpu().data[0]
            total += labels.size(0)
        # print the statistics
        valid_acc = acc / total
        valid_loss = loss / total
        print('Mean train acc = {} over epoch = {}'.format(epoch, valid_acc))
        print('Mean train loss = {} over epoch = {}'.format(epoch, valid_loss))

        if(best_acc<valid_acc):
            best_acc = valid_acc
            save_model(epoch)

        # at 30th epoch we save the model
        if (epoch == 30):
            save_model(epoch)


train(20)

1 个答案:

答案 0 :(得分:2)

我认为您没有考虑到acc += torch.sum(predClass == labels.data)返回张量而不是浮点值。根据您使用的pytorch版本,我认为您应该将其更改为:

acc += torch.sum(predClass == labels.data).cpu().data[0] #pytorch 0.3
acc += torch.sum(predClass == labels.data).item() #pytorch 0.4

尽管您的代码似乎适用于旧的pytorch版本,但我还是建议您升级到0.4版本。

我还提到了您的代码中的其他问题/错别字。

您正在为每个时期加载数据集。

for epoch in range(num_epochs):
    print('\n\nEpoch {}'.format(epoch))
    train_sampler, validation_sampler = ld.split_train_validation()
    train_loader = DataLoader(ld.allData, batch_size=30, sampler=train_sampler, shuffle=False)
    validation_loader = DataLoader(dataset=ld.allData, batch_size=1, sampler=validation_sampler)
    ...

那不应该发生,一次加载就足够了

train_sampler, validation_sampler = ld.split_train_validation()
train_loader = DataLoader(ld.allData, batch_size=30, sampler=train_sampler, shuffle=False)
validation_loader = DataLoader(dataset=ld.allData, batch_size=1, sampler=validation_sampler)
for epoch in range(num_epochs):
    print('\n\nEpoch {}'.format(epoch))
    ...

在您拥有的训练部分中(验证中不会发生这种情况):

train_acc = acc/total
train_loss = loss / total
print('Mean train acc = {} over epoch = {}'.format(epoch,acc))
print('Mean train loss = {} over epoch = {}'.format(epoch, loss))

您在打印acc而不是train_acc的地方

此外,在验证部分中,我提到您在打印print('Mean train acc = {} over epoch = {}'.format(epoch, valid_acc))时应该像'Mean val acc'这样。

使用我创建的标准模型和CIFAR数据集来更改此代码行,训练似乎会收敛,在每个时期准确性都会提高,而平均损耗值会降低。

希望我能为您服务!