如何计算验证损失? (简单的线性回归)

时间:2020-08-12 03:12:30

标签: python deep-learning neural-network pytorch linear-regression

我目前正在学习如何使用PyTorch构建神经网络。我以前已经学过Keras,我想在PyTorch中做同样的事情,例如'model.fit',并绘制一个包含训练损失和验证损失的图形。

为了知道模型是否不合适,我必须绘制一个图以比较训练损失和验证损失。

但是,我无法计算正确的验证损失。我知道,梯度只能在训练期间进行更新,而不能在测试/验证期间进行更新。梯度没有变化,这是否意味着损耗不会变化?抱歉,我的概念还不够清楚。但是我认为不是,应该通过使用损失函数比较预期输出和预测来计算损失。

在我的代码中,用于训练的数据集为80个,用于验证的数据集为20个。在我的代码中,神经网络正在预测以下公式:y = 2X ^ 3 + 7X ^ 2-8 * X + 120 它很容易计算,因此我将其用于学习如何通过PyTorch构建神经网络。

这是我的代码:

import torch
import torch.nn as nn    #neural network model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torch.autograd import Variable
from sklearn.preprocessing import MinMaxScaler

#Load datasets
dataset = pd.read_csv('test_100.csv')

X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, -1:].values

X_scaler = MinMaxScaler()
Y_scaler = MinMaxScaler()
print(X_scaler.fit(X))
print(Y_scaler.fit(Y))
X = X_scaler.transform(X)
Y = Y_scaler.transform(Y)

x_temp_train = X[:79]
y_temp_train = Y[:79]
x_temp_test = X[80:]
y_temp_test = Y[80:]

X_train = torch.FloatTensor(x_temp_train)
Y_train = torch.FloatTensor(y_temp_train)
X_test = torch.FloatTensor(x_temp_test)
Y_test = torch.FloatTensor(y_temp_test)

D_in = 1 # D_in is input features
H = 24 # H is hidden dimension
D_out = 1 # D_out is output features.

#Define a Artifical Neural Network model
class Net(nn.Module):
#------------------Two Layers------------------------------
    def __init__(self, D_in, H, D_out):
        super(Net, self).__init__()

        self.linear1 = nn.Linear(D_in, H)  
        self.linear2 = nn.Linear(H, D_out)
        
    def forward(self, x):
        h_relu = self.linear1(x).clamp(min=0)
        prediction = self.linear2(h_relu)
        return prediction
model = Net(D_in, H, D_out)
print(model)

#Define a Loss function and optimizer
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.2) #2e-7

#Training
inputs = Variable(X_train)
outputs = Variable(Y_train)
inputs_val = Variable(X_test)
outputs_val = Variable(Y_test)
loss_values = []
val_values = []
epoch = []
for phase in ['train', 'val']:
    if phase == 'train':
        model.train()  # Set model to training mode
    else:
        optimizer.zero_grad() #zero the parameter gradients
        model.eval()   # Set model to evaluate mode
    for i in range(50):      #epoch=50
        if phase == 'train':
            model.train()
            prediction = model(inputs)
            loss = criterion(prediction, outputs) 
            print('train loss')
            print(loss)
            loss_values.append(loss.detach())
            optimizer.zero_grad() #zero the parameter gradients
            epoch.append(i)
            loss.backward()       #compute gradients(dloss/dx)
            optimizer.step()      #updates the parameters
        elif phase == 'val':
            model.eval()
            prediction_val = model(inputs_val)
            loss_val = criterion(prediction_val, outputs_val) 
            print('validation loss')
            print(loss)
            val_values.append(loss_val.detach())
            optimizer.zero_grad() #zero the parameter gradients
          
plt.plot(epoch,loss_values)
plt.plot(epoch, val_values)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','validation'], loc='upper left')
plt.show()

这是结果:

train loss
tensor(0.9788, grad_fn=<MseLossBackward>)
tensor(2.0834, grad_fn=<MseLossBackward>)
tensor(3.2902, grad_fn=<MseLossBackward>)
tensor(0.8851, grad_fn=<MseLossBackward>)
tensor(0.0832, grad_fn=<MseLossBackward>)
tensor(0.0402, grad_fn=<MseLossBackward>)
tensor(0.0323, grad_fn=<MseLossBackward>)
tensor(0.0263, grad_fn=<MseLossBackward>)
tensor(0.0217, grad_fn=<MseLossBackward>)
tensor(0.0181, grad_fn=<MseLossBackward>)
tensor(0.0153, grad_fn=<MseLossBackward>)
tensor(0.0132, grad_fn=<MseLossBackward>)
tensor(0.0116, grad_fn=<MseLossBackward>)
tensor(0.0103, grad_fn=<MseLossBackward>)
tensor(0.0094, grad_fn=<MseLossBackward>)
tensor(0.0087, grad_fn=<MseLossBackward>)
tensor(0.0081, grad_fn=<MseLossBackward>)
tensor(0.0077, grad_fn=<MseLossBackward>)
tensor(0.0074, grad_fn=<MseLossBackward>)
tensor(0.0072, grad_fn=<MseLossBackward>)
tensor(0.0070, grad_fn=<MseLossBackward>)
tensor(0.0068, grad_fn=<MseLossBackward>)
tensor(0.0067, grad_fn=<MseLossBackward>)
tensor(0.0067, grad_fn=<MseLossBackward>)
tensor(0.0066, grad_fn=<MseLossBackward>)
tensor(0.0065, grad_fn=<MseLossBackward>)
tensor(0.0065, grad_fn=<MseLossBackward>)
tensor(0.0065, grad_fn=<MseLossBackward>)
tensor(0.0064, grad_fn=<MseLossBackward>)
tensor(0.0064, grad_fn=<MseLossBackward>)
tensor(0.0064, grad_fn=<MseLossBackward>)
tensor(0.0064, grad_fn=<MseLossBackward>)
tensor(0.0063, grad_fn=<MseLossBackward>)
tensor(0.0063, grad_fn=<MseLossBackward>)
tensor(0.0063, grad_fn=<MseLossBackward>)
tensor(0.0063, grad_fn=<MseLossBackward>)
tensor(0.0063, grad_fn=<MseLossBackward>)
tensor(0.0062, grad_fn=<MseLossBackward>)
tensor(0.0062, grad_fn=<MseLossBackward>)
tensor(0.0062, grad_fn=<MseLossBackward>)
tensor(0.0062, grad_fn=<MseLossBackward>)
tensor(0.0062, grad_fn=<MseLossBackward>)
tensor(0.0062, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)

validation loss
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)

Train Loss Vs. Validation Loss

验证损失是一条平线。这不是我想要的。

2 个答案:

答案 0 :(得分:0)

您应该在每个训练时期之后进行验证,以“验证”模型功能。此外,在验证阶段,模型参数不会更改,因此很明显,您的验证会不断损失。您的代码应如下:

培训时期1

验证

培训纪元2

验证

...

不要忘记在损失计算和平均中使用loss.item()而不是单独使用损失。因为损失给您的是grad_function而不是浮点值。

答案 1 :(得分:0)

您编写的代码首先是训练整个数据集的模型,然后训练了模型之后,它正在计算验证损失。由于现在模型已修复,您将看到一条平线,没有任何变化是验证损失。您需要做的是更改每次循环的for循环的顺序,首先进行训练,然后进行验证。像这样:

for i in range(50):
    for phase in ['train', 'val']:
        if phase == 'train':
            model.train()
            prediction = model(inputs)
            loss = criterion(prediction, outputs) 
            print('train loss')
            print(loss)
            loss_values.append(loss.detach())
            optimizer.zero_grad() #zero the parameter gradients
            epoch.append(i)
            loss.backward()       #compute gradients(dloss/dx)
            optimizer.step()      #updates the parameters
        elif phase == 'val':
            model.eval()
            prediction_val = model(inputs_val)
            loss_val = criterion(prediction_val, outputs_val) 
            print('validation loss')
            print(loss)
            val_values.append(loss_val.detach())
            optimizer.zero_grad() #zero the parameter gradients