我想要一个RNN模型并教它学习从“hihell”生成“ihello”。我是Pytorch的新手,并按照视频中的指令编写代码。
我写了两个名为train.py
和model.py
的python文件。
这是model.py
:
#----------------- model for teach rnn hihell to ihello
#----------------- OUR MODEL ---------------------
import torch
import torch.nn as nn
from torch import autograd
class Model(nn.Module):
def __init__(self):
super(Model,self).__init__()
self.rnn=nn.RNN(input_size=input_size,hidden_size=hidden_size,batch_first=True)
def forward(self,x,hidden):
#Reshape input in (batch_size,sequence_length,input_size)
x=x.view(batch_size,sequence_length,input_size)
#Propagate input through RNN
#Input:(batch,seq+len,input_size)
out,hidden=self.rnn(x,hidden)
out=out.view(-1,num_classes)
return hidden,out
def init_hidden(self):
#Initialize hidden and cell states
#(num_layers*num_directions,batch,hidden_size)
return autograd.Variable(torch.zeros(num_layers,batch_size,hidden_size))
这是train.py
:
"""----------------------train for teach rnn to hihell to ihello--------------------------"""
#----------------- DATA PREPARATION ---------------------
#Import
import torch
import torch.nn as nn
from torch import autograd
from model import Model
import sys
idx2char=['h','i','e','l','o']
#Teach hihell->ihello
x_data=[0,1,0,2,3,3]#hihell
y_data=[1,0,2,3,3,4]#ihello
one_hot_lookup=[[1,0,0,0,0],#0
[0,1,0,0,0],#1
[0,0,1,0,0],#2
[0,0,0,1,0],#3
[0,0,0,0,1]]#4
x_one_hot=[one_hot_lookup[x] for x in x_data]
inputs=autograd.Variable(torch.Tensor(x_one_hot))
labels=autograd.Variable(torch.LongTensor(y_data))
""" ----------- Parameters Initialization------------"""
num_classes = 5
input_size = 5 # one hot size
hidden_size = 5 # output from LSTM to directly predict onr-hot
batch_size = 1 # one sequence
sequence_length = 1 # let's do one by one
num_layers = 1 # one layer RNN
"""----------------- LOSS AND TRAINING ---------------------"""
#Instantiate RNN model
model=Model()
#Set loss and optimizer function
#CrossEntropyLoss=LogSoftmax+NLLLOSS
criterion=torch.nn.CrossEntropyLoss()
optimizer=torch.optim.Adam(model.parameters(),lr=0.1)
"""----------------Train the model-------------------"""
for epoch in range(100):
optimizer.zero_grad()
loss=0
hidden=model.init_hidden()
sys.stdout.write("Predicted String:")
for input,label in zip(inputs,labels):
#print(input.size(),label.size())
hidden,output=model(input,hidden)
val,idx=output.max(1)
sys.stdout.write(idx2char[idx.data[0]])
loss+=criterion(output,label)
print(",epoch:%d,loss:%1.3f"%(epoch+1,loss.data[0]))
loss.backward()
optimizer.step()
当我运行train.py
时,收到此错误:
self.rnn = nn.RNN(input_size = input_size,hidden_size = hidden_size,batch_first =真) NameError:名称'input_size'未定义
我不知道为什么会收到此错误,因为我在代码的上面一行中有input_size=5
。有人能帮助我吗?感谢。
答案 0 :(得分:2)
train.py
(num_classes
,input_size
,...)中定义的变量范围是train.py
本身。它们仅在此文件中可见。 model.py
没有注意到这些。
我建议在构造函数中包含这些参数:
class Model(nn.Module):
def __init__(self, hidden_size, input_size):
# same
然后将模型称为:
model = Model(hidden_size, input_size)
同样,对于您在train.py
中定义的其他变量(并希望在model.py
中使用它们),您必须将它们作为参数传递给它们各自的函数,或者传递给构造函数并存储它们作为属性。