我正在构建一个CNN,并针对从A到I(9类)的字母手势手势进行训练,每个图像都是224x224大小的RGB。
不确定我需要转置哪个矩阵以及如何转置。我已经设法匹配了图层的输入和输出,但是那是矩阵乘法的事情,还不确定如何解决。
class LargeNet(nn.Module):
def __init__(self):
super(LargeNet, self).__init__()
self.name = "large"
self.conv1 = nn.Conv2d(3, 5, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(5, 10, 5)
self.fc1 = nn.Linear(10 * 53 * 53, 32)
self.fc2 = nn.Linear(32, 9)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
print('x1')
x = self.pool(F.relu(self.conv2(x)))
print('x2')
x = x.view(-1, 10*53*53)
print('x3')
x = F.relu(self.fc1(x))
print('x4')
x = x.view(-1, 1)
x = self.fc2(x)
print('x5')
x = x.squeeze(1) # Flatten to [batch_size]
return x
和培训代码
#Loss and optimizer
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(model2.parameters(), lr=learning_rate, momentum=0.9)
# Train the model
total_step = len(train_loader)
loss_list = []
acc_list = []
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
print(i,images.size(),labels.size())
# Run the forward pass
outputs = model2(images)
labels=labels.unsqueeze(1)
labels=labels.float()
loss = criterion(outputs, labels)
代码最多打印到x4,然后出现此错误RuntimeError:大小不匹配,m1:[32 x 1],m2:[32 x 9],位于C:\ w \ 1 \ s \ tmp_conda_3.7_055457 \ conda \ conda-bld \ pytorch_1565416617654 \ work \ aten \ src \ TH / generic / THTensorMath.cpp:752
完全回溯错误:https://ibb.co/ykqy5wM
答案 0 :(得分:2)
x=x.view(-1,1)
函数中不需要x = x.squeeze(1)
和forward
。删除这两行。您的输出形状将为(batch_size, 9)
。
此外,您需要将labels
转换为单点编码,格式为(batch_size, 9)
。
class LargeNet(nn.Module):
def __init__(self):
super(LargeNet, self).__init__()
self.name = "large"
self.conv1 = nn.Conv2d(3, 5, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(5, 10, 5)
self.fc1 = nn.Linear(10 * 53 * 53, 32)
self.fc2 = nn.Linear(32, 9)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 10*53*53)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
model2 = LargeNet()
#Loss and optimizer
criterion = nn.BCEWithLogitsLoss()
# nn.BCELoss()
optimizer = optim.SGD(model2.parameters(), lr=0.1, momentum=0.9)
images = torch.from_numpy(np.random.randn(2,3,224,224)).float() # fake images, batch_size is 2
labels = torch.tensor([1,2]).long() # fake labels
outputs = model2(images)
one_hot_labels = torch.eye(9)[labels]
loss = criterion(outputs, one_hot_labels)