我正在尝试使用PyTorch构建CNN以预测图像中的手指数量。网络:
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.Layer1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(3, 3)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(in_channels=256, out_channels=16, kernel_size=(1, 1)),
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(in_channels=256, out_channels=16, kernel_size=(1, 1)),
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(in_channels=128, out_channels=16, kernel_size=(1, 1)),
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(in_channels=128, out_channels=16, kernel_size=(1, 1)),
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3)),
nn.ReLU(),
)
self.Layer2 = nn.Sequential(
nn.Linear(1536, 100),
nn.Tanh(),
nn.Linear(100, 6),
nn.Softmax()
)
self.optimizer = optimizers.Adadelta(self.parameters())
def forward(self, X):
X = self.Layer1(X)
print(X.shape)
X = self.Layer2(X.reshape(1, 1536))
X = X.squeeze()
return X
def calc_loss(self, X, num):
out = self.forward(X).unsqueeze(dim=0)
print("Output: "+str(out))
target = torch.tensor([num], dtype=torch.int64).cuda()
criterion = nn.CrossEntropyLoss()
loss = criterion(out, target)
return loss
def train_step(self, X, Y):
loss = self.calc_loss(X, Y)
print(loss)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
但是,训练完成后,所有预测值几乎都相同(约0.15〜0.18)。
似乎网络会平均输出概率以最大程度地减少损失,而不是学习实际值。
无论我是使用Softmax作为具有交叉熵损失的最后一层,还是使用具有二进制交叉熵,MSE或SmoothL1Loss的Sigmoid,都会得到相同的结果。
在使用Adam优化器的情况下,我得到的结果只有在1e-12〜1e-14范围内。
我想念什么?
答案 0 :(得分:1)
如果您使用的是CrossEntropyLoss
,则无需在Softmax
中使用forward
。它已经包含在CrossEntropyLoss
中,因此您需要“原始”输出。但是,如果在推理期间需要Softmax
,请改用NLLLoss
+'Softmax'。
您可以找到更多信息here