TypeError:“ NoneType”对象不可调用(Pytorch)

时间:2020-03-01 17:46:55

标签: python pytorch

我正在尝试完成使用pytorch的神经网络类的实现。 但是升级步骤会导致弹出与“无类型”相关的错误。 我正在使用Jupyter Notebook将Pytorch Pkg与Python 3.73一起使用。 问题出在步骤中,我必须进行权重更新步骤,然后将梯度值归零。

        class NNet(torch.nn.Module):

def __init__(self, n_inputs, n_hiddens_per_layer, n_outputs, act_func='tanh'):
    super().__init__()  # call parent class (torch.nn.Module) constructor

    # Set self.n_hiddens_per_layer to [] if argument is 0, [], or [0]
    if n_hiddens_per_layer == 0 or n_hiddens_per_layer == [] or n_hiddens_per_layer == [0]:
        self.n_hiddens_per_layer = []
    else:
        self.n_hiddens_per_layer = n_hiddens_per_layer

    self.hidden_layers = torch.nn.ModuleList()  # necessary for model.to('cuda')

    for nh in self.n_hiddens_per_layer:
        self.hidden_layers.append( torch.nn.Sequential(
            torch.nn.Linear(n_inputs, nh),
            torch.nn.Tanh() if act_func == 'tanh' else torch.nn.ReLU()))

        n_inputs = nh

    self.output_layer = torch.nn.Linear(n_inputs, n_outputs)

    self.Xmeans = None
    self.Xstds = None
    self.Tmeans = None
    self.Tstds = None

    self.error_trace = []

def forward(self, X):
    Y = X
    for hidden_layer in self.hidden_layers:
        Y = hidden_layer(Y)
    Y = self.output_layer(Y)
    return Y

def train(self, X, T, n_epochs, learning_rate, verbose=True):

    # Set data matrices to torch.tensors if not already.
    if not isinstance(X, torch.Tensor):
        X = torch.from_numpy(X).float()
    if not isinstance(T, torch.Tensor):
        T = torch.from_numpy(T).float()
    W = torch.zeros((2, 1), requires_grad=True)
    print(W.requires_grad)

    # Calculate standardization parameters if not already calculated
    if self.Xmeans is None:
        self.Xmeans = X.mean(0)
        self.Xstds = X.std(0)
        self.Xstds[self.Xstds == 0] = 1
        self.Tmeans = T.mean(0)
        self.Tstds = T.std(0)
        self.Tstds[self.Tstds == 0] = 1

    # Standardize inputs and targets
    X = (X - self.Xmeans) / self.Xstds
    T = (T - self.Tmeans) / self.Tstds

    # Set optimizer to Adam and loss functions to MSELoss
    optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
    mse_func = torch.nn.MSELoss()

    # For each epoch:
    #   Do forward pass to calculate output Y.
    #   Calculate mean squared error loss, mse.
    #   Calculate gradient of mse with respect to all weights by calling mse.backward().
    #   Take weight update step, then zero the gradient values.
    #   Unstandardize the mse error and save in self.error_trace
    #   Print epoch+1 and unstandardized error if verbose is True and
    #             (epoch+1 is n_epochs or epoch+1 % (n_epochs // 10) == 0)

    for epoch in range(n_epochs):
        #   Do forward pass to calculate output Y.
        Y = self.forward(X)
        print("Y = \n",Y)
        #   Calculate mean squared error loss, mse.
        mse = ((T - Y)**2).mean()
        #mse = torch.mean((T - Y[-1]) ** 2)
        print("Y shape = \n",Y.shape)
        print("Tshape = \n",T.shape)
        print("MSE = \n",mse)

        #   Calculate gradient of mse with respect to all weights by calling mse.backward().
        #W.retain_grad()
        mse.backward(torch.ones(100))
        #print("mse.backward(torch.ones(100))",mse.backward(torch.ones(100)))
        #   Take weight update step, then zero the gradient values.
        #print("W.grad = ",W.grad())
        with torch.no_grad():
            W = learning_rate*W.grad()
            print("kuttu",W.requires_grad)
            W -= learning_rate * W.grad()
            W.grad.zero_()

        #   Unstandardize the mse error and save in self.error_trace
        self.error_trace = mse * self.Tstds

        #. . .


def use(self, X):

   # Set input matrix to torch.tensors if not already.
     if not isinstance(X, torch.Tensor):
        X = torch.from_numpy(X).float()

    # Standardize X
        print("here=\n",type(X))
        X = (X - torch.mean(X)) / self.Xstds


    # Do forward pass and unstandardize resulting output. Assign to variable Y.

    # Return output Y after detaching from computation graph and converting to numpy
        return Y.detach().numpy()




*<ipython-input-20-6e1e577f866d> in train(self, X, T, n_epochs, learning_rate, verbose)
     86             #   Take weight update step, then zero the gradient values.
     87             with torch.no_grad():
---> 88                 W = learning_rate*W.grad()
     89                 print("w",W.requires_grad)
     90                 W -= learning_rate * W.grad()*

TypeError:“ NoneType”对象不可调用

0 个答案:

没有答案