使用cuda和float张量的问题

时间:2018-07-27 16:47:39

标签: python gpu pytorch tensor

我有一些代码,当我运行它时,出现以下错误:预期类型为torch.cuda.FloatTensor的对象,但为参数#2'other'发现了torch.FloatTensor类型

根据此错误消息,我认为将模型推入GPU时出现问题。但是,我不确定问题出在哪里。

我将代码放置在我认为问题可能在该问题的结尾处的地方。有人可以说明错误的确切含义以及如何解决吗?任何帮助深表感谢。

class VGG(nn.Module):
    '''
    VGG model 
    '''
    def __init__(self, features): # features represents the layers array
        super(VGG, self).__init__()
        self.features = features
        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(512,512),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(512, 512),
            nn.ReLU(True),
            nn.Linear(512, 10),
        )
         # Initialize weights
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                m.bias.data.zero_()


    def forward(self, x): # x is the image, we run x through the layers
        print("Running through features")
        x = self.features(x) # runs through all features, where each feature is a function
        print("Finsihed features, going to classifier")
        x = x.view(x.size(0), -1) 
        # after running through features, does sequential steps to finally classify
        x = self.classifier(x)
        return x


def make_layers(cfg, batch_norm=False):
   # print("Making layers!")
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
            rlstm =RLSTM(v)
            rlstm.input_to_state = torch.nn.DataParallel(rlstm.input_to_state)
            rlstm.state_to_state = torch.nn.DataParallel(rlstm.state_to_state)
            rlstm=rlstm.cuda()
            layers+=[rlstm]

    return nn.Sequential(*layers)

class RLSTM(nn.Module):
    def __init__(self,ch):
       # torch.set_default_tensor_type('torch.cuda.FloatTensor')
        super(RLSTM,self).__init__()
        self.ch=ch
        self.input_to_state = torch.nn.Conv2d(self.ch,4*self.ch,kernel_size=(1,3),padding=(0,1))
        self.state_to_state = torch.nn.Conv2d(self.ch,4*self.ch,kernel_size=(1,3),padding=(0,1)) # error is here: hidPrev is an array - not a valid number of input channel
       # self.input_to_state = self.input_to_state.cuda()
        #self.state_to_state = self.state_to_state.cuda()

    def forward(self, image):
      #  print("going in row forward")
        global current
        global _layer
        global isgates
        size = image.size()
        print("size: "+str(size))
        b = size[0]
        indvs = list(image.split(1,0)) # split up the batch into individual images
        #print(indvs[0].size())
        tensor_array = []
        for i in range(b):
            current = 0
            _layer = []
            isgates = []
            print(len(tensor_array))
            tensor_array.append(self.RowLSTM(indvs[i]))

        seq=tuple(tensor_array)
        trans = torch.cat(seq,0)
        print(trans.size())
        return trans.cuda() # trying to make floattensor error go away 
    def RowLSTM(self, image): 
    #    print("going in rowlstm")
        global current
        global _layer
        global isgates


        # input-to-state (K_is * x_i) : 3x1 convolution. generate 4h x n x n tensor. 4hxnxn tensor contains all i -> s info

    # the input to state convolution should only be computed one time 
        if current==0:
            n = image.size()[2]
            ch=image.size()[1]
           # input_to_state = torch.nn.Conv2d(ch,4*ch,kernel_size=(1,3),padding=(0,1))
          #  print("about to do convolution")
            isgates = self.splitIS(self.input_to_state(image)) # convolve, then split into gates (4 per row)

            cell=RowLSTMCell(0,torch.randn(ch,n,1),torch.randn(ch,n,1),torch.randn(ch,n,1),torch.randn(ch,n,1),torch.randn(ch,n,1),torch.randn(ch,n,1))
            # now have dummy, learnable variables for first row
            _layer.append(cell)
            print("layeres: "+str(len(_layer)))
        else:   
            Cell_prev = _layer[current-1] # access previous row
            hidPrev = Cell_prev.getHiddenState() 
            ch = image.size()[1] 
        #   print("about to apply conv1d")
           # state_to_state = torch.nn.Conv2d(ch,4*ch,kernel_size=(1,3),padding=(0,1)) # error is here: hidPrev is an array - not a valid number of input channel
        #   print("applied conv1d") 
            prevHid=Cell_prev.getHiddenState()
            ssgates = self.splitSS(self.state_to_state(prevHid.unsqueeze(0))) #need to unsqueeze (Ex: currently 16x5, need to make 1x16x5)
            gates = self.addGates(isgates,ssgates,current)
            # split gates
            ig, og, fg, gg = gates[0], gates[1], gates[2], gates[3] # into four, ADD SIGMOID!
            cell = RowLSTMCell(Cell_prev,ig,og,fg,gg,0,0)
            cell.compute()
            _layer.append(cell)
        # attempting to eliminate requirement of getting size

        #print(current)
        try:
            print("adding one to current")
            current+=1
            y=(isgates[0][0][1][current])

            return self.RowLSTM(image) #expecting floattensor, but gets cuda floattensor

        except Exception as error:
            print(error)
            concats=[]
            print(len(_layer))
            for cell in _layer:
                tensor=torch.unsqueeze(cell.h,0)
                concats.append(tensor)
            seq=tuple(concats)

            print("non catted tensor: "+str(tensor.size()))
            tense=torch.cat(seq,3)
            print("catted lstm tensor "+str(tense.size()))
            return tensor

代码运行,但是当尝试通过try / except块时,将引发错误。我猜错误在这里某处?

编辑:使用打印语句查看程序在何处正确终止,似乎我已记下的代码中有错误!现在,我将其发布,因为错误“ finished computing”永远不会被打印出来,因此看起来像是compute()函数中的错误。

class RowLSTMCell(): #inherit torch.nn.LSTM?
    def __init__(self,prev_row, i, o, f, g, c, h):
        #super(RowLSTMCell,self).__init__()
        self.c=c

        #self.c = self.c.cuda()
        self.h=h
       # self.h = self.h.cuda()
        self.i=i
        self.i = self.i.cuda()
        self.o=o
        self.o = self.o.cuda()
        self.g=g
        self.g = self.g.cuda()
        self.f=f
        self.f = self.f.cuda()
        self.prev_row=prev_row 
    def getStateSize(self):
        return self._state_size

    def getOutputSize(self):
        return self._output_size

    def compute(self):
        print("computing")
        c_prev = self.prev_row.getCellState()
        h_prev = self.prev_row.getHiddenState()

        self.c = self.f * c_prev + self.i * self.g
        self.h = torch.tanh(self.c) * self.o
        print("finished computing")
    def getHiddenState(self):
        return self.h

    def getCellState(self):
        return self.c

1 个答案:

答案 0 :(得分:0)

self.c和self.h不是CUDA!我猜您真的必须确保每个张量都使用cuda。我只是将.cuda()放在self.c的末尾,而self.h的计算放在了execute()方法中。