pytorch中的多元输入LSTM

时间:2019-07-02 19:27:34

标签: python pytorch lstm

我想为Pytorch中的多变量输入实现

根据https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/这篇使用keras的文章,输入数据的形状为(样本数,时间步数,并行特征数)

in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90])
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95])
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))])
. . . 
Input     Output
[[10 15]
 [20 25]
 [30 35]] 65
[[20 25]
 [30 35]
 [40 45]] 85
[[30 35]
 [40 45]
 [50 55]] 105
[[40 45]
 [50 55]
 [60 65]] 125
[[50 55]
 [60 65]
 [70 75]] 145
[[60 65]
 [70 75]
 [80 85]] 165
[[70 75]
 [80 85]
 [90 95]] 185

n_timesteps = 3
n_features = 2

在喀拉拉邦,这似乎很简单:

model.add(LSTM(50, activation='relu', input_shape=(n_timesteps, n_features)))

除了创建n_features的LSTM作为第一层并分别馈入(想象成多个序列流)然后将其输出展平到线性层之外,还可以通过其他方式完成吗?

我不确定100%,但是根据LSTM的性质,输入不能被展平并作为1D数组传递,因为每个序列“都应按照LSTM学习的不同规则播放”。

那么用keras等于PyTorch的实现如何 input of shape (seq_len, batch, input_size)(来源https://pytorch.org/docs/stable/nn.html#lstm


编辑:

  

除了创建n_features的LSTM作为第一层并分别喂入(想象成多个序列流)然后将其输出展平到线性层之外,还可以通过其他方式完成吗?

根据PyTorch docs input_size 参数实际上表示功能数量(如果它表示并行序列的数量)

2 个答案:

答案 0 :(得分:0)

pytorch中任何rnn单元格中的

input是3d输入,格式为(seq_len,batch,input_size)或(batch,seq_len,input_size)(如果您更喜欢第二个(也像我一样)init lstm层)或其他rnn层)和

bach_first = True

https://discuss.pytorch.org/t/could-someone-explain-batch-first-true-in-lstm/15402

您在设置中也没有任何递归关系。 如果要创建多对一计数器,请创建大小为(-1,n,1)的输入 其中-1是您想要的大小,n是位数,每个滴答数一位,例如输入[[10] [20] [30]],输出-60,输入[[30,] [70]]输出100等,输入的长度必须介于1到最大之间,以了解rnn关系

import random

import numpy as np

import torch


def rnd_io():    
    return  np.random.randint(100, size=(random.randint(1,10), 1))


class CountRNN(torch.nn.Module):

def __init__(self):
    super(CountRNN, self).__init__()

    self.rnn = torch.nn.RNN(1, 20,num_layers=1, batch_first=True)
    self.fc = torch.nn.Linear(20, 1)


def forward(self, x):        
    full_out, last_out = self.rnn(x)
    return self.fc(last_out)


nnet = CountRNN()

criterion = torch.nn.MSELoss(reduction='sum')

optimizer = torch.optim.Adam(nnet.parameters(), lr=0.0005)

batch_size = 100

batches = 10000 * 1000

printout = max(batches //(20* 1000),1)

for t in range(batches):

optimizer.zero_grad()

x_batch = torch.unsqueeze(torch.from_numpy(rnd_io()).float(),0)

y_batch = torch.unsqueeze(torch.sum(x_batch),0)

output = nnet.forward(x_batch) 

loss = criterion(output, y_batch)

if t % printout == 0:
    print('step : ' , t , 'loss : ' , loss.item())  
    torch.save(nnet.state_dict(), './rnn_summ.pth')  

loss.backward()

optimizer.step()

答案 1 :(得分:0)

我希望对有问题的部分进行评论以使之有意义:

数据准备

import random
import numpy as np
import torch

# multivariate data preparation
from numpy import array
from numpy import hstack

# split a multivariate sequence into samples
def split_sequences(sequences, n_steps):
    X, y = list(), list()
    for i in range(len(sequences)):
        # find the end of this pattern
        end_ix = i + n_steps
        # check if we are beyond the dataset
        if end_ix > len(sequences):
            break
        # gather input and output parts of the pattern
        seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1]
        X.append(seq_x)
        y.append(seq_y)
    return array(X), array(y)

# define input sequence
in_seq1 = array([x for x in range(0,100,10)])
in_seq2 = array([x for x in range(5,105,10)])
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))])
# convert to [rows, columns] structure
in_seq1 = in_seq1.reshape((len(in_seq1), 1))
in_seq2 = in_seq2.reshape((len(in_seq2), 1))
out_seq = out_seq.reshape((len(out_seq), 1))
# horizontally stack columns
dataset = hstack((in_seq1, in_seq2, out_seq))

多元LSTM网络

class MV_LSTM(torch.nn.Module):
    def __init__(self,n_features,seq_length):
        super(MV_LSTM, self).__init__()
        self.n_features = n_features
        self.seq_len = seq_length
        self.n_hidden = 20 # number of hidden states
        self.n_layers = 1 # number of LSTM layers (stacked)

        self.l_lstm = torch.nn.LSTM(input_size = n_features, 
                                 hidden_size = self.n_hidden,
                                 num_layers = self.n_layers, 
                                 batch_first = True)
        # according to pytorch docs LSTM output is 
        # (batch_size,seq_len, num_directions * hidden_size)
        # when considering batch_first = True
        self.l_linear = torch.nn.Linear(self.n_hidden*self.seq_len, 1)


    def init_hidden(self, batch_size):
        # even with batch_first = True this remains same as docs
        hidden_state = torch.zeros(self.n_layers,batch_size,self.n_hidden)
        cell_state = torch.zeros(self.n_layers,batch_size,self.n_hidden)
        self.hidden = (hidden_state, cell_state)


    def forward(self, x):        
        batch_size, seq_len, _ = x.size()

        lstm_out, self.hidden = self.l_lstm(x,self.hidden)
        # lstm_out(with batch_first = True) is 
        # (batch_size,seq_len,num_directions * hidden_size)
        # for following linear layer we want to keep batch_size dimension and merge rest       
        # .contiguous() -> solves tensor compatibility error
        x = lstm_out.contiguous().view(batch_size,-1)
        return self.l_linear(x)

初始化

n_features = 2 # this is number of parallel inputs
n_timesteps = 3 # this is number of timesteps

# convert dataset into input/output
X, y = split_sequences(dataset, n_timesteps)
print(X.shape, y.shape)

# create NN
mv_net = MV_LSTM(n_features,n_timesteps)
criterion = torch.nn.MSELoss() # reduction='sum' created huge loss value
optimizer = torch.optim.Adam(mv_net.parameters(), lr=1e-1)

train_episodes = 500
batch_size = 16

培训

mv_net.train()
for t in range(train_episodes):
    for b in range(0,len(X),batch_size):
        inpt = X[b:b+batch_size,:,:]
        target = y[b:b+batch_size]    

        x_batch = torch.tensor(inpt,dtype=torch.float32)    
        y_batch = torch.tensor(target,dtype=torch.float32)

        mv_net.init_hidden(x_batch.size(0))
    #    lstm_out, _ = mv_net.l_lstm(x_batch,nnet.hidden)    
    #    lstm_out.contiguous().view(x_batch.size(0),-1)
        output = mv_net(x_batch) 
        loss = criterion(output.view(-1), y_batch)  

        loss.backward()
        optimizer.step()        
        optimizer.zero_grad() 
    print('step : ' , t , 'loss : ' , loss.item())

结果

step :  499 loss :  0.0010267728939652443 # probably overfitted due to 500 training episodes