如何将train_loss和valid_loss与epoch_loss分开存储?

时间:2019-01-18 16:02:15

标签: slice pytorch epoch python-3.7

我正在尝试将train_loss和valid_loss与epoch_loss分开存储,因为epoch_loss返回两个损耗值(第一个是火车损耗,第二个是有效损耗)。 epoch_loss是一个float64对象。我试图将其转换为numpy数组,然后访问每个切片,但再次返回两个值。

这是代码段

criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)  # 1e-3

# Decay LR by a factor of 0.1 every 4 epochs.
#step size: Period of learning rate decay.
#gamma = Multiplicative factor of learning rate decay. Default: 0.1, should 
float
scheduler = lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.1)


def train_model(model, criterion, optimizer, scheduler, num_epochs=4):


 since = time.time()

 best_model_wts = copy.deepcopy(model.state_dict())
 best_acc = 0.0

 for epoch in range(num_epochs):  # loop over the dataset multiple times
    print('Epoch {}/{}'.format(epoch, num_epochs - 1))
    print('-' * 30)

    # Each epoch has a training and validation phase
    for phase in ['train', 'valid']:
        if phase == 'train':
            scheduler.step()

            model.train()  # Set model to training mode
        else:
            model.eval()   # Set model to evaluate mode

        train_loss = 0.0
        total_train = 0
        correct_train = 0

        #iterate over data
        for t_image, mask, image_paths, target_paths in dataLoaders[phase]:

            # get the inputs
            t_image = t_image.to(device)
            mask = mask.to(device)

             # zeroes the gradient buffers of all parameters
            optimizer.zero_grad()

            # forward
            # track history if only in train
            with torch.set_grad_enabled(phase == 'train'):
                outputs = model(t_image) 
                _, predicted = torch.max(outputs.data, 1)
                loss = criterion(outputs, mask) # calculate the loss

                # backward + optimize only if in training phase
                if phase == 'train':
                    loss.backward() # back propagation
                    optimizer.step() # update gradients                        

            # accuracy
            train_loss += loss.item()
            total_train += mask.nelement()  # number of pixel in the batch
            correct_train += predicted.eq(mask.data).sum().item() # sum all precited pixel values

        epoch_loss = train_loss / len(dataLoaders[phase].dataset)
        epoch_acc = (correct_train / total_train)

        print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))



         #deep copy the model
        if phase == 'valid' and epoch_acc > best_acc:
            best_acc = epoch_acc
            best_model_wts = copy.deepcopy(model.state_dict())

    print()
time_elapsed = time.time() - since

print('Training complete in {:.0f}m {:.0f}s'.format(
    time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))


torch.save(model.state_dict(), 'train_valid_exp1.pth')   
# load best model weights
model.load_state_dict(best_model_wts)

0 个答案:

没有答案