Tensor的bool值不止一个值是模糊的Pytorch错误

时间:2019-10-19 22:24:22

标签: pytorch tensor

我有一个要训练的神经网络,但我一直收到“张量的布尔值具有多个值是模棱两可的”错误。

这是我的网络:

from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
from torch.nn import functional as F
from DataSetLoader import ReplaysDataSet


class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.l1 = torch.nn.Linear(12,24)
        self.l2 = torch.nn.Linear(24, 20)
        self.l3 = torch.nn.Linear(20, 16)
        self.l4 = torch.nn.Linear(16, 10)
        self.l5 = torch.nn.Linear(10, 6)
        self.l6 = torch.nn.Linear(6, 1)

    def forward(self, t):
        t = F.relu(self.l1(t))
        t = F.relu(self.l2(t))
        t = F.relu(self.l3(t))
        t = F.relu(self.l4(t))
        t = F.relu(self.l5(t))
        y_pred = F.relu(self.l6(t))

        return y_pred
    def get_num_correct(self,preds,labels):
        return preds.argmax(dim=1).eq(labels).sum()

class Test():
    model = Model()
    optimiser = torch.optim.SGD(model.parameters(), lr=0.01)

    dataset = ReplaysDataSet()
    trainLoader = DataLoader(dataset=dataset, batch_size=250, shuffle=True)
    batch = next(iter(trainLoader))
    criterion = nn.MSELoss
    for epoch in range(10):
        totalLoss = 0
        totalCorrect = 0
        for batch in trainLoader:

            data, label = batch
            prediction = model(data)
            prediction = prediction.reshape(250)
            print(prediction)
            print(label)
            loss = criterion(prediction, label)
            optimiser.zero_grad()

            loss.backward()
            optimiser.step()

            totalLoss += loss.item()
            print(totalLoss)
            totalCorrect += model.get_num_correct(prediction, label)
            print(totalCorrect)

这是我的数据加载器

import torch
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torch.nn import functional as F

class ReplaysDataSet(Dataset):
    def __init__(self):
        self.xy = np.genfromtxt("dataset.csv", delimiter=',', dtype=np.float32)
        self.length = len(self.xy)
        self.x_data = torch.from_numpy(self.xy[0:,1:])
        self.y_data = torch.from_numpy(self.xy[0:,0])
        self.length = len(self.xy)
    def __getitem__(self, index):
        return self.x_data[index], self.y_data[index]
    def __len__(self):
        return self.length

这是来自csv im培训的一些数据

1,303,497,784,748,743,169,462,479,785,310,26,701
1,658,598,645,786,248,381,80,428,248,530,591,145
0,38,490,796,637,130,380,226,359,720,392,464,497
0,94,752,645,801,381,479,475,381,227,645,445,248
0,59,806,254,521,91,538,212,645,609,227,545,531
1,65,254,685,565,91,796,445,658,465,485,472,184
1,385,248,211,612,82,38,485,652,212,373,563,26
1,796,596,785,310,145,479,142,685,748,635,798,474
1,380,658,485,645,36,598,806,428,786,798,645,113
0,743,214,625,642,530,784,645,641,65,598,786,637

我得到的错误是

Traceback (most recent call last):
  File "C:/Users/tayya/PycharmProjects/untitled/NetworkFile.py", line 32, in <module>
    class Test():
  File "C:/Users/tayya/PycharmProjects/untitled/NetworkFile.py", line 50, in Test
    loss = criterion(prediction, label)
File "C:\Users\tayya\PycharmProjects\untitled\venv\lib\site-packages\torch\nn\modules\loss.py", line 428, in __init__
super(MSELoss, self).__init__(size_average, reduce, reduction)
  File "C:\Users\tayya\PycharmProjects\untitled\venv\lib\site-packages\torch\nn\modules\loss.py", line 12, in __init__
    self.reduction = _Reduction.legacy_get_string(size_average, reduce)
  File "C:\Users\tayya\PycharmProjects\untitled\venv\lib\site-packages\torch\nn\_reduction.py", line 36, in legacy_get_string
    if size_average and reduce:
RuntimeError: bool value of Tensor with more than one value is ambiguous

任何帮助将不胜感激。我是NN的新手,所以如果我犯了一个明显的错误,我会道歉。

1 个答案:

答案 0 :(得分:1)

几件事:
1.根据您的数据,我得出结论,这是一个分类问题,而不是回归问题。因此,使用MSELoss并不是最佳选择,我将其更改为BCELoss,应该更合适。
2.您的网络的最后一次激活是RELU,并且由于这是一个二进制分类问题,因此Sigmoid是更好的选择。
3.在“ get_num_correct”函数中做了一个小修正。

希望这对您有用:

from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
from torch.nn import functional as F
from DataSetLoader import ReplaysDataSet
import torch

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.l1 = torch.nn.Linear(12,24)
        self.l2 = torch.nn.Linear(24, 20)
        self.l3 = torch.nn.Linear(20, 16)
        self.l4 = torch.nn.Linear(16, 10)
        self.l5 = torch.nn.Linear(10, 6)
        self.l6 = torch.nn.Linear(6, 1)

    def forward(self, t):
        t = F.relu(self.l1(t))
        t = F.relu(self.l2(t))
        t = F.relu(self.l3(t))
        t = F.relu(self.l4(t))
        t = F.relu(self.l5(t))
        y_pred = F.sigmoid(self.l6(t))

        return y_pred
    def get_num_correct(self,preds,labels):
        return preds.round().squeeze().eq(labels).numpy().sum()

class Test():
    model = Model()
    optimiser = torch.optim.SGD(model.parameters(), lr=0.01)

    dataset = ReplaysDataSet()
    trainLoader = DataLoader(dataset=dataset, batch_size=250, shuffle=True)
    batch = next(iter(trainLoader))
    criterion = nn.BCELoss()
    for epoch in range(10):
        totalLoss = 0
        totalCorrect = 0
        for batch in trainLoader:

            data, label = batch
            prediction = model(data)
            print(prediction)
            print(label)
            loss = criterion(prediction.squeeze(), label)
            optimiser.zero_grad()

            loss.backward()
            optimiser.step()

            totalLoss += loss.item()
            print(totalLoss)
            totalCorrect += model.get_num_correct(prediction, label)
            print(totalCorrect)