[RUNTIME ERROR]:张量的元素0不需要grad并且没有grad_fn

时间:2019-07-18 02:56:55

标签: python machine-learning deep-learning pytorch logistic-regression

我正在处理分类问题。我正在尝试将网址分类为恶意或良性。我正在针对此问题实施逻辑回归,这是我的代码:

import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable

class LogisticRegressionModel(nn.Module):

    def __init__(self, in_dim, num_classes):
        super().__init__()
        self.linear = nn.Linear(in_dim, num_classes)

    def forward(self, x):
        out = self.linear(x)
        return out

class Train(LogisticRegressionModel):


    def __init__(self, in_dim, num_classes, lr, batch_size):
        super().__init__(in_dim, num_classes)
        self.batch_size = batch_size
        self.learning_rate = lr
        self.input_layer_dim = in_dim
        self.output_layer_dim = num_classes
        self.criterion = nn.CrossEntropyLoss()
        self.model = LogisticRegressionModel(self.input_layer_dim, self.output_layer_dim)
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.model = self.model.to(self.device)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr = self.learning_rate)  

    def epochs(self, iterations, train_dataset, batch_size):
        epochs = int(iterations/(len(train_dataset)/batch_size))
        return epochs

    def train_model(self, training_data, n_iters):
        batch = self.batch_size
        epochs = self.epochs(n_iters, training_data, batch)
        training_data = torch.utils.data.DataLoader(dataset = training_data, batch_size = batch, shuffle = False)

        for epoch in range(epochs):
            for i, data in enumerate(training_data):

                X_train = data[:, :-1]
                Y_train = data[:, -1]

                if torch.cuda.is_available():
                    x = Variable(X_train).cuda()
                    y = Variable(Y_train).cuda()

                else:
                    x = X_train.float()
                    y = Y_train.type(torch.LongTensor)

                self.optimizer.zero_grad()
                out = self.model(x).data
                loss = self.criterion(out, y)
                loss.backward()
                self.optimizer.step()

batch_size = 1000
train_class = Train((training_set.shape[1]-1), number_of_target_labels+1, 0.001, batch_size)
rain_class.train_model(training_set, batch_size)

但是,运行此代码块时出现以下错误:

RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn

我不知道为什么会这样。如果您想查看我的笔记本和数据集,请访问以下链接:https://github.com/islamaymansais/Malicious-URL-Classifier

这是一个分类项目,用于将URL分类为良性或恶意类别(网络钓鱼,污损,垃圾邮件,恶意软件)。让我知道您是否有任何需要澄清的问题。

0 个答案:

没有答案