RuntimeError:类型为Variable [torch.FloatTensor]的预期对象,但为参数#1'mat2'发现类型为Variable [torch.cuda.FloatTensor]

时间:2018-07-26 07:51:46

标签: python deep-learning pytorch

当我运行pytorch代码时,我在下面收到此错误信息:

class BatchRNN(nn.Module):
def __init__(self, input_size, hidden_size, rnn_type=nn.LSTM, 
        bidirectional=False, batch_norm=True, dropout = 0.1):
    super(BatchRNN, self).__init__()
    self.input_size = input_size
    self.hidden_size = hidden_size
    self.bidirectional = bidirectional
    self.batch_norm = SequenceWise(nn.BatchNorm1d(input_size)).cuda() if batch_norm else None
    self.rnn = rnn_type(input_size=input_size, hidden_size=hidden_size,
                        bidirectional=bidirectional, dropout = dropout, bias=False,batch_first=True).cuda()

def forward(self, x):
    if self.batch_norm is not None:
        x = self.batch_norm(x).cuda()
    x, _ = self.rnn(x)
    self.rnn.cuda().flatten_parameters()
    return x

class CTC_RNN(nn.Module):
def __init__(self, rnn_input_size, rnn_hidden_size, rnn_layers=1,
        rnn_type=nn.LSTM, bidirectional=True, 
        batch_norm=True, num_class=1232, drop_out = 0.1):
    super(CTC_RNN, self).__init__()
    self.rnn_input_size = rnn_input_size
    self.rnn_hidden_size = rnn_hidden_size
    self.rnn_layers = rnn_layers
    self.rnn_type = rnn_type
    self.num_class = num_class
    self.num_directions = 2 if bidirectional else 1
    #cnn
    self.conv1_cnn=nn.Conv2d(1,256,(2,rnn_input_size)).cuda()
    self.conv2_cnn=nn.Conv2d(1,256,(2,256)).cuda()
    self.fc_cnn=nn.Linear(256,rnn_hidden_size).cuda()
    self.softmax_cnn=torch.nn.Softmax().cuda()
    rnns = []
    rnn = BatchRNN(input_size=rnn_input_size, hidden_size=rnn_hidden_size, 
                    rnn_type=rnn_type, bidirectional=bidirectional, 
                    batch_norm=False).cuda()
    rnns.append(('0', rnn))
    for i in range(rnn_layers-1):
        rnn = BatchRNN(input_size=self.num_directions*rnn_hidden_size, 
                hidden_size=rnn_hidden_size, rnn_type=rnn_type, 
                bidirectional=bidirectional, dropout = drop_out, batch_norm = batch_norm).cuda()
        rnns.append(('%d' % (i+1), rnn))

    self.rnns = nn.Sequential(OrderedDict(rnns)).cuda()

    if batch_norm :
        fc = nn.Sequential(nn.BatchNorm1d(self.num_directions*rnn_hidden_size).cuda(),
                    nn.Linear(self.num_directions*rnn_hidden_size, rnn_hidden_size, bias=False).cuda()).cuda()
    else:
        fc = nn.Linear(self.num_directions*rnn_hidden_size, rnn_hidden_size, bias=False).cuda()
    self.fc = SequenceWise(fc).cuda()
    self.inference_log_softmax = InferenceBatchLogSoftmax().cuda()
    self.softmax=torch.nn.Softmax().cuda()
    #self.inference_softmax = InferenceBatchSoftmax()  
    #tddf fusion lstm
    self.tddf_lstm=nn.LSTMCell(rnn_hidden_size,rnn_hidden_size).cuda()
    self.fc_s=nn.Linear(rnn_hidden_size,2,bias=True).cuda()
    self.fc_c=nn.Linear(rnn_hidden_size,2,bias=True) .cuda()
    self.hx=Variable(torch.zeros(100,rnn_hidden_size),requires_grad=True).cuda()
    self.cx=Variable(torch.zeros(100,rnn_hidden_size),requires_grad=True).cuda()
    self.fc_tddf=nn.Linear(rnn_hidden_size,num_class).cuda()
def forward(self, x,y):  #x: packed padded sequence [x.data: the origin data] [x.batch_sizes: the batch_size of each frames] [x_len: type:list not torch.IntTensor] 
    #ipdb.set_trace()   
    x = self.rnns(x)
    x = self.fc(x)
    x = self.inference_log_softmax(x)#(max_step,batch_size,dim)
    x=x.transpose(0,1)
    #x = self.inference_softmax(x)
    y=self.conv1_cnn(y)
    #banben 2_relu
    y=F.relu(y)
    y=self.conv2_cnn(torch.transpose(y,1,3))
    y=F.relu(y)
    y=self.fc_cnn(torch.transpose(y,1,3))#(batch_size,1,max_step,dim)
    #y=torch.transpose(y,1,3)
    y=y.view(100,-1,self.rnn_hidden_size)
    y=torch.transpose(y,0,1)

    output=Variable(torch.zeros(x.cpu().data.numpy().shape[0],100,self.rnn_hidden_size)).cuda()
    for i in range(x.cpu().data.numpy().shape[0]):
        #ipdb.set_trace()
        if i==0:
            st=F.softmax(self.fc_s(self.hx))
            ct=F.sigmoid(self.fc_c(self.hx))
            at=st*ct
            tddf_input_i_x=x[i]*at[:,0].contiguous().view(100,1).expand(100,self.rnn_hidden_size)
            tddf_input_i_y=y[i]*at[:,1].contiguous().view(100,1).expand(100,self.rnn_hidden_size)
            tddf_input_i=tddf_input_i_x+tddf_input_i_y
            hx,cx=self.tddf_lstm(tddf_input_i,(self.hx,self.cx))
            output[i]=hx
        else:
            st=F.softmax(self.fc_s(hx))
            ct=F.sigmoid(self.fc_c(hx))
            at=st*ct
            tddf_input_i_x=x[i]*at[:,0].contiguous().view(100,1).expand(100,self.rnn_hidden_size)
            tddf_input_i_y=y[i]*at[:,1].contiguous().view(100,1).expand(100,self.rnn_hidden_size)
            tddf_input_i=tddf_input_i_x+tddf_input_i_y
            #tddf_input_i=x[i]*at[:,0].contiguous().view(100,1).expand(100,self.rnn_hidden_size)+y[i]*at[:1].contiguous().view(100,1).expand(100,self.rnn_hidden_size)
            hx,cx=self.tddf_lstm(tddf_input_i,(hx,cx))
            output[i]=hx
    return self.fc_tddf(output)

错误显示如下:

Traceback (most recent call last):
File "/home/xinhaoran/PycharmProjects/TDDF/PH_ctc_cnn_tddf.py", line 358, in <module>
main()
File "/home/xinhaoran/PycharmProjects/TDDF/PH_ctc_cnn_tddf.py", line 353, in main
train()
File "/home/xinhaoran/PycharmProjects/TDDF/PH_ctc_cnn_tddf.py", line 256, in train
probs = model(feats,feats_cnn).cuda(async=True)
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 357, in __call__
result = self.forward(*input, **kwargs)
File "/home/xinhaoran/PycharmProjects/TDDF/model_tddf.py", line 167, in forward
x = self.rnns(x)
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 357, in __call__
result = self.forward(*input, **kwargs)
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/nn/modules/container.py", line 67, in forward
input = module(input)
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 357, in __call__
result = self.forward(*input, **kwargs)
File "/home/xinhaoran/PycharmProjects/TDDF/model_tddf.py", line 115, in forward
x, _ = self.rnn(x).cuda()
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 357, in __call__
result = self.forward(*input, **kwargs)
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/nn/modules/rnn.py", line 204, in forward
output, hidden = func(input, self.all_weights, hx)
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/nn/_functions/rnn.py", line 385, in forward
return func(input, *fargs, **fkwargs)
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/nn/_functions/rnn.py", line 245, in forward
nexth, output = func(input, hidden, weight)
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/nn/_functions/rnn.py", line 85, in forward
hy, output = inner(input, hidden[l], weight[l])
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/nn/_functions/rnn.py", line 158, in forward
hidden = inner(step_input, hidden, *weight)
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/nn/_functions/rnn.py", line 32, in LSTMCell
gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/nn/functional.py", line 837, in linear
output = input.matmul(weight.t())
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/autograd/variable.py", line 386, in matmul
return torch.matmul(self, other)
File "/home/xinhaoran/anaconda3/lib/python3.6/site-packages/torch/functional.py", line 174, in matmul
return torch.mm(tensor1, tensor2)
RuntimeError: Expected object of type Variable[torch.FloatTensor] but found type Variable[torch.cuda.FloatTensor] for argument #1 'mat2'

如果有人可以帮助我,我将不胜感激! 记录下来,这似乎可以在pytorch0.2.0上使用(我现在使用的是0.3.0)

0 个答案:

没有答案