我正在学习Pytorch,并试图建立一个可以记住以前输入内容的网络。 我尝试了2种不同的输入/输出结构(请参见下文),但没有任何方法可以按照我的方式工作。
输入1:
in:[4,2,7,8]
输出[[0,0,4],[0,4,2],[4,2,7],[2,7,8]]
代码:
def histroy(num_samples=4,look_back=3):
data=np.random.randint(10,size=(num_samples)).tolist()
lab=[[0]*look_back]
for i in data:
lab.append(lab[-1][1:]+[i])
return data,lab[1:]
input2:
in:[4,2,7,8]
输出:[0,4,2,7]
def histroy(num_samples=4):
data=np.random.randint(10,size=(num_samples)).tolist()
lab=[0]
for i in data:
lab.append(i)
return data,lab
我尝试了许多不同的网络结构和培训方法,但似乎没有任何坚持。
我认为我唯一正确的事情是net.hidden = net.init_hidden()
应该超出每个纪元和loss.backward(retain_graph=True)
的范围,但这似乎无济于事
当前,它可以学习序列中的最后一个数字,但似乎从不学习其他任何人
我最后一次尝试:
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
def histroy(num_samples=4,look_back=3):
data=np.random.randint(10,size=(num_samples)).tolist()
lab=[[0]*look_back]
for i in data:
lab.append(lab[-1][1:]+[i])
return data,lab[1:]
class Net(nn.Module):
def __init__(self, input_dim, hidden_dim, batch_size, output_dim=10, num_layers=1):
super(Net, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.num_layers = num_layers
self.memory = nn.RNN(self.input_dim,self.hidden_dim,self.num_layers)
self.linear = nn.Linear(self.hidden_dim, output_dim)
self.first=True
def init_hidden(self):
# This is what we'll initialise our hidden state as
return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim),
torch.zeros(self.num_layers, self.batch_size, self.hidden_dim))
def forward(self, input):
self.memory_out, self.hidden = self.memory(input.view(len(input), self.batch_size, -1))
y_pred = self.linear(self.memory_out[-1].view(self.batch_size, -1))
return y_pred.view(-1)
if __name__ == '__main__':
data_amount = 10000
batch_size = 1 # default is 32
data_amount-=data_amount%batch_size
number_of_times_on_the_same_data = 250
look_back=5
net=Net(input_dim=1,hidden_dim=25,batch_size=batch_size,output_dim=look_back)
data,labs=histroy(data_amount,look_back)
data = torch.Tensor(data).float()
labs = torch.Tensor(labs).float()
optimizer = optim.Adam(net.parameters())
criterion = torch.nn.MSELoss(size_average=False)
for epoch in range(number_of_times_on_the_same_data): # loop over the dataset multiple times
running_loss = 0.0
data, labs = histroy(data_amount, look_back)
data = torch.Tensor(data).float()
labs = torch.Tensor(labs).float()
net.hidden = net.init_hidden()
print("epoch",epoch)
for i in range(0, data_amount, batch_size):
inputs = data[i:i + batch_size]
labels = labs[i:i + batch_size]
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward(retain_graph=True)
optimizer.step()
running_loss += loss.item()
if i >= data_amount-batch_size:
print("loss",loss)
net.hidden = net.init_hidden()
print("Outputs",outputs)
print("Input", data[-1*look_back:])
print("labels",labels)
答案 0 :(得分:0)
您的网络存在的问题是输入的形状为1:
POST _reindex
{
"source": {
"index": "source_index"
},
"dest": {
"index": "dest_index",
"pipeline": "my-pipeline-geo"
}
}
这就是您的RNN仅预测最后一个数字的原因,因为在这种情况下,您没有使用look_back属性。您必须修复代码才能具有大小[1,5]的输入。您的代码应如下所示:
for i in range(0, data_amount, batch_size):
inputs = data[i:i + batch_size]
labels = labs[i:i + batch_size]
print(inputs.shape,labels.shape)
>>>torch.Size([1]) torch.Size([1, 5])
>>>torch.Size([1]) torch.Size([1, 5])...
输出:
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
def histroy(num_samples=4,look_back=3):
data=np.random.randint(10,size=(num_samples)).tolist()
lab=[[0]*look_back]
for i in data:
lab.append(lab[-1][1:]+[i])
return lab[:-1],lab[1:]
class Net(nn.Module):
def __init__(self, input_dim, hidden_dim, batch_size, output_dim=10, num_layers=1):
super(Net, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.num_layers = num_layers
self.memory = nn.RNN(self.input_dim,self.hidden_dim,self.num_layers)
self.linear = nn.Linear(self.hidden_dim, output_dim)
self.first=True
def init_hidden(self):
# This is what we'll initialise our hidden state as
return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim),
torch.zeros(self.num_layers, self.batch_size, self.hidden_dim))
def forward(self, input):
self.memory_out, self.hidden = self.memory(input.view(len(input), self.batch_size, -1))
y_pred = self.linear(self.memory_out[-1].view(self.batch_size, -1))
return y_pred.view(-1)
if __name__ == '__main__':
data_amount = 10000
batch_size = 1 # default is 32
data_amount-=data_amount%batch_size
number_of_times_on_the_same_data = 250
look_back=5
net=Net(input_dim=1,hidden_dim=25,batch_size=batch_size,output_dim=look_back)
data,labs=histroy(data_amount,look_back)
data = torch.Tensor(data).float()
labs = torch.Tensor(labs).float()
optimizer = optim.Adam(net.parameters())
criterion = torch.nn.MSELoss(size_average=False)
for epoch in range(number_of_times_on_the_same_data): # loop over the dataset multiple times
running_loss = 0.0
data, labs = histroy(data_amount, look_back)
data = torch.Tensor(data).float()
labs = torch.Tensor(labs).float()
net.hidden = net.init_hidden()
print("epoch",epoch)
for i in range(0, data_amount, batch_size):
inputs = data[i:i + batch_size].view(-1)
labels = labs[i:i + batch_size]
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward(retain_graph=True)
optimizer.step()
running_loss += loss.item()
if i >= data_amount-batch_size:
print("loss",loss)
net.hidden = net.init_hidden()
print("Outputs",outputs)
print("Input", data[i:i + batch_size][-1])
print("labels",labels)