Pytorch:在非单尺寸3时,张量a(24)的大小必须与张量b(48)的大小匹配

时间:2020-08-16 03:37:16

标签: pytorch feature-extraction image-compression

下面的代码可以正常工作并产生适当的结果。

import torch
import torch.nn as nn
import torch.nn.functional as F

from modules import ConvLSTMCell, Sign


class EncoderCell(nn.Module):
    def __init__(self):
        super(EncoderCell, self).__init__()

        self.conv = nn.Conv2d(
            3, 64, kernel_size=3, stride=2, padding=1, bias=False)
        self.rnn1 = ConvLSTMCell(
            64,
            256,
            kernel_size=3,
            stride=2,
            padding=1,
            hidden_kernel_size=1,
            bias=False)
        self.rnn2 = ConvLSTMCell(
            256,
            512,
            kernel_size=3,
            stride=2,
            padding=1,
            hidden_kernel_size=1,
            bias=False)
        self.rnn3 = ConvLSTMCell(
            512,
            512,
            kernel_size=3,
            stride=2,
            padding=1,
            hidden_kernel_size=1,
            bias=False)

    def forward(self, input, hidden1, hidden2, hidden3):
        x = self.conv(input)

        hidden1 = self.rnn1(x, hidden1)
        x = hidden1[0]

        hidden2 = self.rnn2(x, hidden2)
        x = hidden2[0]

        hidden3 = self.rnn3(x, hidden3)
        x = hidden3[0]

        return x, hidden1, hidden2, hidden3


class Binarizer(nn.Module):
    def __init__(self):
        super(Binarizer, self).__init__()
        self.conv = nn.Conv2d(512, 32, kernel_size=1, bias=False)
        self.sign = Sign()

    def forward(self, input):
        feat = self.conv(input)
        x = F.tanh(feat)
        return self.sign(x)


class DecoderCell(nn.Module):
    def __init__(self):
        super(DecoderCell, self).__init__()

        self.conv1 = nn.Conv2d(
            32, 512, kernel_size=1, stride=1, padding=0, bias=False)
        self.rnn1 = ConvLSTMCell(
            512,
            512,
            kernel_size=3,
            stride=1,
            padding=1,
            hidden_kernel_size=1,
            bias=False)
        self.rnn2 = ConvLSTMCell(
            128,
            512,
            kernel_size=3,
            stride=1,
            padding=1,
            hidden_kernel_size=1,
            bias=False)
        self.rnn3 = ConvLSTMCell(
            128,
            256,
            kernel_size=3,
            stride=1,
            padding=1,
            hidden_kernel_size=3,
            bias=False)
        self.rnn4 = ConvLSTMCell(
            64,
            128,
            kernel_size=3,
            stride=1,
            padding=1,
            hidden_kernel_size=3,
            bias=False)
        self.conv2 = nn.Conv2d(
            32, 3, kernel_size=1, stride=1, padding=0, bias=False)

    def forward(self, input, hidden1, hidden2, hidden3, hidden4):
        x = self.conv1(input)

        hidden1 = self.rnn1(x, hidden1)
        x = hidden1[0]
        x = F.pixel_shuffle(x, 2)

        hidden2 = self.rnn2(x, hidden2)
        x = hidden2[0]
        x = F.pixel_shuffle(x, 2)

        hidden3 = self.rnn3(x, hidden3)
        x = hidden3[0]
        x = F.pixel_shuffle(x, 2)

        hidden4 = self.rnn4(x, hidden4)
        x = hidden4[0]
        x = F.pixel_shuffle(x, 2)

        x = F.tanh(self.conv2(x)) / 2
        return x, hidden1, hidden2, hidden3, hidden4

现在,我在self.con中进行了更改,并在图层中添加了预训练的resent。现在它显示训练后的张量不匹配误差。一切都一样,只需在代码中添加此行即可。我把**放在那一行

import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models


from modules import ConvLSTMCell, Sign


class EncoderCell(nn.Module):
    def __init__(self):
        super(EncoderCell, self).__init__()

        #self.conv = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)

        **resConv = models.resnet50(pretrained=True)
        resConv.layer4 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
        self.conv = resConv.layer4**


        self.rnn1 = ConvLSTMCell(
            64,
            256,
            kernel_size=3,
            stride=2,
            padding=1,
            hidden_kernel_size=1,
            bias=False)
        self.rnn2 = ConvLSTMCell(
            256,
            512,
            kernel_size=3,
            stride=2,
            padding=1,
            hidden_kernel_size=1,
            bias=False)
        self.rnn3 = ConvLSTMCell(
            512,
            512,
            kernel_size=3,
            stride=2,
            padding=1,
            hidden_kernel_size=1,
            bias=False)

    def forward(self, input, hidden1, hidden2, hidden3):

        x = self.conv(input)

        hidden1 = self.rnn1(x, hidden1)
        x = hidden1[0]

        hidden2 = self.rnn2(x, hidden2)
        x = hidden2[0]

        hidden3 = self.rnn3(x, hidden3)
        x = hidden3[0]

        return x, hidden1, hidden2, hidden3


class Binarizer(nn.Module):
    def __init__(self):
        super(Binarizer, self).__init__()
        self.conv = nn.Conv2d(512, 32, kernel_size=1, bias=False)
        self.sign = Sign()

    def forward(self, input):
        feat = self.conv(input)
        x = F.tanh(feat)
        return self.sign(x)


class DecoderCell(nn.Module):
    def __init__(self):
        super(DecoderCell, self).__init__()

        **resConv = models.resnet50(pretrained=True)
        resConv.layer4 = nn.Conv2d(32, 512, kernel_size=3, stride=2, padding=1, bias=False)
        self.conv1 = resConv.layer4**

        self.rnn1 = ConvLSTMCell(
            512,
            512,
            kernel_size=3,
            stride=1,
            padding=1,
            hidden_kernel_size=1,
            bias=False)
        self.rnn2 = ConvLSTMCell(
            128,
            512,
            kernel_size=3,
            stride=1,
            padding=1,
            hidden_kernel_size=1,
            bias=False)
        self.rnn3 = ConvLSTMCell(
            128,
            256,
            kernel_size=3,
            stride=1,
            padding=1,
            hidden_kernel_size=3,
            bias=False)
        self.rnn4 = ConvLSTMCell(
            64,
            128,
            kernel_size=3,
            stride=1,
            padding=1,
            hidden_kernel_size=3,
            bias=False)

        **resConv2 = models.resnet50(pretrained=True)
        resConv2.layer4 = nn.Conv2d(32, 3, kernel_size=1, stride=1, padding=0, bias=False)
        self.conv2 = resConv2.layer4**

    def forward(self, input, hidden1, hidden2, hidden3, hidden4):
        x = self.conv1(input)

        hidden1 = self.rnn1(x, hidden1)
        x = hidden1[0]
        x = F.pixel_shuffle(x, 2)

        hidden2 = self.rnn2(x, hidden2)
        x = hidden2[0]
        x = F.pixel_shuffle(x, 2)

        hidden3 = self.rnn3(x, hidden3)
        x = hidden3[0]
        x = F.pixel_shuffle(x, 2)

        hidden4 = self.rnn4(x, hidden4)
        x = hidden4[0]
        x = F.pixel_shuffle(x, 2)

        x = F.tanh(self.conv2(x)) / 2
        return x, hidden1, hidden2, hidden3, hidden4

2 个答案:

答案 0 :(得分:0)

您做错了方法,有一些解释是

    **resConv = models.resnet50(pretrained=True) # you are reading a model

现在您要用新初始化的图层替换该模型中的图层。其次,resnet50中的layer4是一个包含多个层的顺序块。使用print查看模型中的确切层。

    resConv.layer4 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)

您在这里使用新图层。

self.conv = resConv.layer4**

根据有关预训练层使用情况的查询,您应该这样做

resConv = models.resnet50(pretrained=True)
print(resConv) #see the layer which you want to use
self.conv = resConv.conv1 # replace conv1 with that layer
# note: conv1 is the name of first conv layer in resnet

为此,我还建议在对象初始化之外获取并添加此层(或权重和偏差)。像这样:

enc = EncoderCell()
resnet50 = models.resnet50(pretrained=True)

然后选择

enc.conv = resnet50.conv1

或更理想地

enc.conv.load_state_dict(resnet50.layer1.state_dict())

原因是,在nn.Module类上调用state_dict()会创建参数的克隆(在这种情况下为权重和偏差),只要这两个实例可以通过nn.Module.load_state_dict()方法加载即可的nn.Module具有相同的形状。这样您便获得了预训练的权重,并且它们与预训练的模型完全分离。然后,您可以摆脱预训练的模型,因为它的内存可能很大。

del resnet50

答案 1 :(得分:0)

我对其他答案提出了潜在的改进,但是为了解决您遇到的错误,我也在这里回答。如果代码在您进行编辑之前运行,并且您要更改的图层与上一个图层的形状相同,那么我猜测是它可能与创建resnet50对象形成的计算图有关。我会向其他答案推荐我在编辑中提到的方法,但我将在这里再次声明(注意,这是假设您保留了原始代码):

# instantiate you encoder (repeat these steps with the decoder as well)
enc = EncoderCell()
# get the pretrained model
resnet = models.resnet50(pretrained=True)
# load the state dict into the regular conv layer
enc.conv.load_state_dict(resnet50.layer4.state_dict())

这应该将来自resnet50模型的预训练权重和偏差加载到您的conv层中,并且可以对解码器conv层进行此操作,只要它们都共享相同的形状即可。

要对不匹配错误进行更多测试,我建议在模型的forward()方法中使用调试器或print语句,以便在应用每一层后查看张量的形状,就像这样

def forward(self, input, hidden1, hidden2, hidden3, hidden4):
    print(x.size())
    x = self.conv1(input)
    print(x.size())
    hidden1 = self.rnn1(x, hidden1)
    x = hidden1[0]
    x = F.pixel_shuffle(x, 2)

    hidden2 = self.rnn2(x, hidden2)
    x = hidden2[0]
    x = F.pixel_shuffle(x, 2)

    hidden3 = self.rnn3(x, hidden3)
    x = hidden3[0]
    x = F.pixel_shuffle(x, 2)

    hidden4 = self.rnn4(x, hidden4)
    x = hidden4[0]
    x = F.pixel_shuffle(x, 2)

    x = F.tanh(self.conv2(x)) / 2
    return x, hidden1, hidden2, hidden3, hidden4

当然,您可以将print语句放在forward方法中的其他位置。我也强烈建议您使用调试器。 pycharm使得这非常容易,并且还使得在科学模式下它提供的python控制台旁边的变量状态变得容易。在变量通过某些层(例如卷积层)之后,可能需要寻找一种计算变量大小的方法。这是众所周知的,并且存在一些公式可以根据初始大小,过滤器大小,步幅宽度和填充来计算尺寸大小。