[原帖,见下文编辑]
我是PyTorch的新手,并尝试在其中执行句子分类任务。
我在每个句子(手套嵌入)中平均了嵌入词,以形成句子嵌入。因此,每个句子嵌入的维度是相同的。据我所知,由于我已经有嵌入,因此在使用LSTM之前我不需要嵌入层。 我的模型如下:
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class LSTM1(nn.Module):
def __init__(self,args):
super(LSTM1,self).__init__()
self.args = args
self.hidden_dim = args.hidden_dim
self.lstm = nn.LSTM(args.embed_dim, args.hidden_dim)
self.hidden2tag = nn.Linear(args.hidden_dim, 2)
self.hidden = self.init_hidden()
def init_hidden(self):
return (autograd.Variable(torch.zeros(1,1, self.hidden_dim).cuda()),
autograd.Variable(torch.zeros(1,1, self.hidden_dim).cuda()))
def forward(self,embeds):
embeds = autograd.Variable(torch.from_numpy(embeds[0]).float().cuda())
lstm_output, self.hidden = self.lstm(embeds.view(1, 1, -1), self.hidden)
tag_space = self.hidden2tag(lstm_output.view(1, -1))
scores = F.log_softmax(tag_space)
return scores
我以嵌入的形式传递句子如下:
model = model.LSTM1(args).cuda()
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-5, momentum=0.9, weight_decay=1e-5)
optimizer.zero_grad()
for epoch in range(20):
for i in range(len(sentences)):
optimizer.zero_grad()
model.hidden = model.init_hidden()
target = prepare_targets(tag_phrase[i],tag_to_ix,1) #Gets a Variable(long tensor) for the target, single value (either 0 or 1)
score = model(sentences[i]) #sentences[i] is the embedding of sentence i
loss = criterion(score,target)
loss.backward()
optimizer.step()
我的怀疑:
谢谢。
[编辑]改进代码
embeddings = torch.from_numpy(embeddings).float().cuda()
args.embed_num=embeddings.size(0)
args.embed_dim=embeddings.size(1)
seq = [i for i in range(13000)]
seq_tensor = torch.LongTensor(seq).cuda() #Index tensor corresponding to the embedding.
target = prepare_targets(tag_phrase[:13000],tag_to_ix,1)
train_data = torch.utils.data.TensorDataset(seq_tensor.cuda(),target.data.cuda())
trainloader = torch.utils.data.DataLoader(train_data, batch_size=100, shuffle=True)
model = model2.LSTM1(args,embeddings, 3).cuda()
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-4, momentum=0.9, weight_decay=1e-5)
for epoch in range(1000):
avg_loss=0.
for i, data in enumerate(trainloader,0):
seq, target = data
seq_var, target = autograd.Variable(seq.cuda()), autograd.Variable(target.cuda())
correct=0
optimizer.zero_grad()
model.hidden=model.init_hidden()
score=model(seq_var)
loss = criterion(score,target)
loss.backward()
optimizer.step()
epoch_lis.append(epoch)
losses.append(loss.data[0])
_,predicted = torch.max(score.data,1)
correct += (predicted == target.data).sum()
print i, correct
型号:
class LSTM1(nn.Module):
def __init__(self,args,embeddings, layers):
super(LSTM1,self).__init__()
self.num_layers=layers
self.args = args
self.hidden_dim = args.hidden_dim
self.embed = nn.Embedding(args.embed_num, args.embed_dim)
self.embed.weight = nn.Parameter(embeddings)
self.lstm = nn.LSTM(args.embed_dim, args.hidden_dim, num_layers=self.num_layers)
self.hidden2tag = nn.Linear(args.hidden_dim, 2)
self.hidden = self.init_hidden()
def init_hidden(self):
return (autograd.Variable(torch.zeros(self.num_layers,1, self.hidden_dim).cuda()),
autograd.Variable(torch.zeros(self.num_layers,1, self.hidden_dim).cuda()))
def forward(self,sentence):
embeds = self.embed(sentence)
lstm_output, self.hidden = self.lstm(embeds.view(len(sentence), 1, -1), self.hidden)
tag_space = self.hidden2tag(lstm_output.view(len(sentence), -1))
scores = F.log_softmax(tag_space)
return scores
答案 0 :(得分:0)
是的,或许,在autograd.Variable(torch.from_numpy(embeds[0]).float().cuda())
中 - 您不需要.float()
,因为embeds
已经是浮动张量。顺便说一下,将单词向量组合成句子向量是可以的,但是为什么你需要RNN来生成句子表示?请仔细阅读您正在做的事情。
是的,您正在关注随机梯度下降(在每个句子后运行backprop)。要了解如何使用小批量梯度下降,您可以看到任何基于语言模型的pytorch示例。例如,snli是文本分类的一个很好的例子。
这取决于您想要如何使用它。我相信,在pytorch中没有使用预先训练好的句子嵌入的“最合适方式”的概念。
您的代码存在严重问题。例如,模型中的LSTM没有做任何事情!此外,如果您想调试代码,我建议您打印损失值并检查损失值是否随着迭代次数的增加而下降。如果没有,那么你需要找出你的模型没有学到什么的原因。
建议:从你的问题来看,你看起来像是pytorch的新手。因此,我建议您在继续之前先阅读官方教程和示例。