输入张量未在pytorch中移动到GPU

时间:2019-05-13 03:47:02

标签: pytorch

运行代码时,出现错误:

输入和参数张量不在同一设备上,在cpu处找到输入张量,在cuda:0处找到参数张量

即使我在输入中使用.cuda()。

Google Colab link

代码:

use_cuda = True
if use_cuda and torch.cuda.is_available():
   model.cuda()

def test():
model.eval()
avgLoss = 0
for dataPoint in range(len(testData)):
    lstmInput = testData[dataPoint][0]
    lstmInput = torch.Tensor(lstmInput)
    lstmInput = lstmInput.view(len(testData[dataPoint][0]), 1, 5)
    label = testData[dataPoint][1]
    label = torch.Tensor(label)
    lstmInput = Variable(lstmInput)
    label = Variable(label)

    if use_cuda and torch.cuda.is_available():
          lstmInput.cuda()
          label.cuda()

    pred_label = model(lstmInput)
    loss = loss_fn(label, pred_label)
    avgLoss += loss.item()
return avgLoss / len(testData)

def train(num_epochs):
model.train()
for epoch in range(num_epochs):
    avgLoss = 0.0
    for datapoint in range(len(trainData)):
        model.hidden = model.init_hidden()
        optimizer.zero_grad()

        lstmInput = trainData[datapoint][0]
        lstmInput = torch.Tensor(lstmInput)
        lstmInput = lstmInput.view(len(trainData[datapoint][0]), 1, 5)
        label = torch.Tensor(trainData[datapoint][1])
        label = label.view(1, 5)
        lstmInput = Variable(lstmInput)
        label = Variable(label)

        if use_cuda and torch.cuda.is_available():
          print("happens")
          lstmInput.cuda()
          label.cuda()

        pred_label = model(lstmInput)
        loss = loss_fn(pred_label, label)
        # print(label, pred_label)
        avgLoss += loss.item()
        loss.backward()
        optimizer.step()
    print("Epoch: ", epoch, "MSELoss: ", avgLoss / len(trainData), "Test Acc: ", test())

1 个答案:

答案 0 :(得分:1)

mtcars %>% sample_n(30) %>% select(colnames(mtcars)[2:5]) %>% summarise_all(list(~ list(funx(.)))) %>% gather() %>% unnest() %>% arrange(pvalue) %>% rename(Parameter = key) %>% mutate(Parameter = ifelse(Transform == "T", paste0("log10_", Parameter), Parameter)) %>% select(Parameter, pvalue) # Parameter pvalue # log10_cyl 0.00000000 # log10_drat 0.01389723 # disp 0.02771770 # log10_hp 0.08493466 方法返回右gpu上的张量,因此您需要将其分配回输入变量:

cuda()