pytorch损失函数返回相同的值并且参数渐变为无

时间:2019-07-08 05:24:27

标签: python-3.x optimization pytorch loss-function

我刚刚使用基于MNA算法的Pytorch开发了自己的优化器,这是我的代码GitHub link的链接,这是有关其他信息MNA Article的相关文章。设计的MNA优化器可以更新权重,尽管损失函数(准则)在第二个时期之后返回错误的值,并且参数的梯度在第二个时期之后也没有。我认为问题在于网络的图形可能会在某处中断,而我的猜测是分配新的权重可能会导致这种情况。我检查了权重和输入,它们是有效的。我使用了所有可能的损失函数,并且所有结果都相同。请帮助我找出为什么损失函数不起作用。 我检查了以下问题,由于我的优化程序没有提到参数,因此它们与我的情况无关。 1. 'None' gradients in pytorch 2. PyTorch: Loss remains constant 3. pytorch loss value not change

import torch
from torch.optim.optimizer import Optimizer
from copy import deepcopy

class MNA(Optimizer):
"""
Implements Modified Neighbor Annealing algorithm.

It has been proposed in `Heart murmur detection based on Wavelet
    Transformation and a synergy between Artificial Neural Network and modified
    Neighbor Annealing methods" published in Artificial Intelligence in Medicine.
    https://doi.org/10.1016/j.artmed.2017.05.005

Arguments:
    params (iterable): iterable of parameters to optimize or dicts defining
        parameter groups
    initTemp     (float, optional): initial temperature  (default: 1)
    schedule     (float, optional): schedule coefficient to decay temperature(default: 0)
    window       (float, optional): this argument control the range in which random numbers will be extracted
    terminateTemp(float, optional): training process will be terminated in this temperature/// will be applied in future versions
    #****************************************************************************************************************************
    subItter (int):number of sub iteration in which algorithm can freely search for better answers.
    prew     (dic):weights from previous iteration
    bestw    (dic):best weights ever found
    bestLoss (float): best loss ever found
    preLoss  (float): loss from previous iteration
    bestTemp (float): best temperature for exploring answers

"""

def __init__(self, params, initTemp=1, schedule=0.99, window=2, terminateTemp=0.00001):
    if not 0.0 < initTemp:
        raise ValueError("Invalid initTemp: {}".format(initTemp))
    if not 0.0 <= schedule:
        raise ValueError("Invalid schedule value: {}".format(schedule))
    if not 0.0 < terminateTemp:
        raise ValueError("Invalid terminateTemp value: {}".format(terminateTemp))
    if not 0.0 < window:
        raise ValueError("Invalid window value{}".format(window))
    #*******************************************************************************
    defaults = dict(initTemp=initTemp, schedule=schedule,
                     window=window, terminateTemp=terminateTemp,subItter=10,bestw=[], prew=[],bestLoss=1000, preLoss=1000,bestTemp=0)

    super(MNA, self).__init__(params, defaults)


    for group in self.param_groups:
        group['prew'] =deepcopy( group['params'])
        group['bestw'] =deepcopy( group['params'])

        for p in group['params']:
            state = self.state[p]
            #state['step'] = 0

#*******************************************************************************
def step(self, closure=None):
    """Performs a single optimization step.

    Arguments:
        closure (callable, optional): A closure that reevaluates the model
            and returns the loss.
    """
    loss = None
    if closure is not None:
        loss = closure()


    for group in self.param_groups:
        if loss < group['bestLoss']:
            if group['bestLoss']-loss > group['bestLoss']*0.005: # as smaller result larger itterations
                group['bestTemp'] = group['initTemp']
                group['initTemp'] = group['initTemp']/group['schedule']
            group['bestLoss'] = loss
            group['bestw'] =deepcopy(group['params'])
        #**************************************************************
        if group['subItter'] == 0:
            group['params']=deepcopy(group['bestw'])
            group['subItter'] =10
        else:
            group['subItter']=group['subItter'] - 1
            if loss > group['preLoss']:
                delt=torch.Tensor([(loss- group['preLoss'])/group['preLoss']])
                if torch.rand(1) <= torch.exp(-delt/group['initTemp']):
                    group['prew']=deepcopy(group['params'])
                    group['preLoss'] = loss
                else:
                    group['params']=deepcopy(group['prew'])
            else:
                group['prew']=deepcopy(group['params'])
                group['preLoss']=loss
        group['initTemp']= group['initTemp'] * group['schedule']
        # access param key in group dictionary

        for p in group['params']:# walkthrough layer by layer
            #if p.grad is None:
            #    continue

        #**************************************************************
            if group['initTemp'] > group['terminateTemp']:
                #generate random var for updating weights
                p.data=p.data +(2 * group['window'] * group['initTemp'] * torch.rand(p.data.size())) - group['window'] * group['initTemp']
                ### TODO: R ke behtarin weight iaft shode ast baiad jaigozin shavad

            else:#terminate
                pass

    return loss

测试脚本:

import torch
import torch.nn as nn
import torch.utils.data
import matplotlib.pyplot as plt
import torch.nn.functional as F
import numpy as np
import math
import random
from torch.autograd import Variable
import mna
from copy import deepcopy

class MyNet(nn.Module):

def __init__(self, input_size, hidden1_size, hidden2_size, num_classes):

    super(MyNet, self).__init__()
    self.fc1 = nn.Linear(input_size, hidden1_size)
    self.relu1 = nn.ReLU()
    self.fc2 = nn.Linear(hidden1_size, hidden2_size)
    self.relu2 = nn.ReLU()
    self.fc3 = nn.Linear(hidden2_size, num_classes)  

def forward(self, x):
    out = self.fc1(x)
    out = self.relu1(out)
    out = self.fc2(out)
    out = self.relu2(out)
    out = self.fc3(out)
    return out


dtrain=[random.random()*20 for cnt in range(1000)]
ddes = [math.sin(x) for x in dtrain]
ddes=[[x] for x in ddes]
dtrain=[[x] for x in dtrain]
dtest=[random.random()*20 for cnt in range(100)]
dtdes = [math.sin(x) for x in dtest]
dtest=[[x] for x in dtest]
dtdes=[[x] for x in dtdes]

#  model
net = MyNet(1, 5, 10, 1)
# loss function
criterion = nn.MSELoss()
# optimizer
optimizer = mna.MNA(net.parameters(), initTemp=1,             schedule=0.99,window=2,terminateTemp=0.00001)


num_epochs =500
train_loss = []
test_loss = []
train_accuracy = []
terr = []
avterr = []
loss=torch.Tensor(1)

for epoch in range(num_epochs):
    train_correct = 0
items=    Variable(torch.from_numpy(np.asarray(dtrain,dtype=np.float32 )))
classes = Variable(torch.from_numpy(np.asarray(ddes ,dtype=np.float32  )))
train_total = classes.size(0)
#
net.train()
optimizer.zero_grad()
outputs = net(items)

loss = criterion(outputs, classes)
loss.backward()
def closure():
    optimizer.zero_grad()
    outputs = net(items)
    loss = criterion(outputs, classes)
    loss.backward()
    return loss
for group in optimizer.param_groups:
    for p in group['params']:
        print('before= ', p.grad)

optimizer.step(closure)

#
train_correct = (torch.abs(outputs.data - classes.data)).sum()
        #*******************************************************************************************
net.eval()                  
train_loss.append(loss.item())
train_accuracy.append(( train_correct / train_total))

#
test_items = Variable(torch.from_numpy(np.asarray(dtest,dtype=np.float32 )))
test_classes = Variable(torch.from_numpy(np.asarray(dtdes,dtype=np.float32)))
total = test_classes.size(0)
outputs = net(test_items)
loss = criterion(outputs, test_classes)
test_loss.append(loss.data.item())

terr = (torch.abs(outputs.data - test_classes)).sum()
avterr.append(( terr / total))
if epoch == num_epochs-1:
    print('finished')

由于python基于缩进以及stackoverflow,因此我将代码上传到github

0 个答案:

没有答案