我正在Pytorch上训练基于vGG16的模型。训练的损失每时每刻都在减少,但验证准确性却保持不变。
Epoch [1/5],Val准确度:0.8498火车损耗:0.4560558137803147 -------------------时代[2/5],Val准确度:0.8498火车损耗:0.4382643524576648 -------------------时代[3/5],准确度:0.8498火车损耗:0.4410017436635879 -------------------时代[4/5],准确度:0.8498火车损耗:0.4410021219067724
-------------------纪元[5/5],Val准确度:0.8498火车损耗:0.4382636753429577
为什么会发生?
有什么解决方案?
我的模型配置:
from torchvision import models
model = models.vgg16(pretrained=True)
for param in model.parameters():
param.requires_grad = True
model.classifier[6]=nn.Sequential(
nn.Linear(4096, 1024),
nn.Linear(1024, 1024),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(1024, 2),
nn.Softmax(dim=1))
learning_rate = 0.01
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr = 0.003, momentum= 0.9)
培训阶段:
from torch.autograd import Variable
model.double().cuda()
epochs = 5
for epoch in range(epochs):
loss = 0
correct = 0
total = len(val_loader)
losses=[]
model.train()
# Train the model
for (observations, labels) in (train_loader):
observations = Variable(observations).permute(0,3,1,2).cuda()
labels = Variable(labels).long().cuda()
# Forward pass
optimizer.zero_grad()
outputs = model(observations)
# Backward pass
loss = criterion(outputs, labels)
loss.backward()
# Optimize
optimizer.step()
losses.append(loss.data.item())
model.eval()
# Test the model on the validation data
for observations, labels in val_loader:
observations = Variable(observations).permute(0,3,1,2).cuda()
labels=labels.long().cuda()
# Forward pass
outputs = model(observations)
_, predicted = torch.max(outputs.data, 1)
correct = correct+(predicted.long().cuda() == labels.long().cuda()).sum()
accuracy = correct.item() / 273
print('Epoch [%2d/%2d] , Val Accuracy: %.4f' % (epoch + 1, epochs, accuracy))
print(np.mean(losses))
print('-------------------')