我想在fc1和fc2层之间添加一个torch.nn.ReLU()
层。
原始代码:
型号:
# ...
self.fc1 = nn.Linear(4096, 256)
self.fc2 = nn.Linear(256, 4096)
# ...
def forward(...):
# ...
x = x.view(-1, 4096)
x = self.fc1(x))
if a7 is not None:
x = x * a7.squeeze()
# ...
我尝试了
# ...
x = x.view(-1, 4096)
x = nn.ReLU(self.fc1(x)))
if a7 is not None:
x = x * a7.squeeze()
# ...
此错误弹出。
答案 0 :(得分:1)
我的回答假设__init__
是一个错字,应该是forward
。如果不是这种情况,请告诉我,我将其删除。
import torch
from torch import nn
class SimpleModel(nn.Module):
def __init__(self, with_relu=False):
super(SimpleModel, self).__init__()
self.fc1 = nn.Sequential(nn.Linear(3, 10), nn.ReLU(inplace=True)) if with_relu else nn.Linear(3, 10)
self.fc2 = nn.Linear(10, 3)
def forward(self, x):
x = self.fc1(x)
print(torch.min(x)) # just to show you ReLU is working...
return self.fc2(x)
# Model without ReLU
net_without_relu = SimpleModel(with_relu=False)
print(net_without_relu)
# Model with ReLU
net_with_relu = SimpleModel(with_relu=True)
print(net_with_relu)
# random input data
x = torch.randn((5, 3))
print(x)
# we expect it to print something < 0
output1 = net_without_relu(x)
# we expect it to print 0.
output2 = net_with_relu(x)
您可以检查下面在Colab上运行的代码:https://colab.research.google.com/drive/1W3Dh4_KPd3iABx5FSzZm3tilm6tnJh0v
要按尝试使用:
x = nn.ReLU(self.fc1(x)))
您可以使用功能性API:
from torch.nn import functional as F
# ...
x = F.relu(self.fc1(x)))
答案 1 :(得分:0)
您不应在__init__
中执行任何查看方法。
初始化应保持您的结构。
例如,这是从AlexNet __init__
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
但是,您的前进方法可能包含重塑,计算和功能。
nn.Sequential
应该是__init__
的一部分,就像在AlexNet中一样:
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
return x
然后您可以向前使用类属性self.features
,self.classifier
。
注意:这是来自PyTorch 0.4的AlexNet的旧模型,但它相当简单,逻辑相同