适用于Pytorch中特定情况的LSTM编码器和解码器体系结构

时间:2019-02-18 22:56:12

标签: python lstm pytorch autoencoder

我正在尝试编写LSTM自动编码器体系结构,以确定给定的多变量序列的重构误差。目的是识别重构不良的观测(行)。

我试图绕过下面提到的网络,但无法决定如何编写从编码器输出获取输入并尝试重构输入的解码器网络

enter image description here

有关体系结构及其描述,请访问https://arxiv.org/pdf/1607.00148.pdf

enter image description here是类似的架构,但具有更好的视觉效果,可在https://jeddy92.github.io/JEddy92.github.io/ts_seq2seq_intro/上找到

class LstmAutoEncoder(nn.module):
    def __init__(self, x_dim, h_dim=(32, 16), z_dim=8, seq_length=144,
                 num_layers=1, dropout_frac=0.25, batchnorm=False):

        super(LstmAutoEncoder, self).__init__()
        self.x_dim = x_dim
        self.h_dim = list(h_dim)
        self.z_dim = z_dim

        self.sq_l = seq_length
        self.num_layers = num_layers

        self.dropout_frac = dropout_frac
        self.batchnorm = batchnorm

        self.encoder = EncoderRNN(x_dim, h_dim, z_dim, num_layers, dropout_frac, batchnorm)
        self.decoder = DecoderRNN(x_dim, h_dim, z_dim, num_layers, dropout_frac, batchnorm)

    def forward(self, x):
        """
        """
        z = self.encoder(x)
        recon_x = self.decoder(z)
        return recon_x


class EncoderRNN(nn.Module):
    def __init__(self, x_dim, h_dim, z_dim, num_layers,
                 dropout_frac, batchnorm):

        super(EncoderRNN, self).__init__()

        self.nl = num_layers
        self.drpt_fr = dropout_frac
        self.bn = batchnorm

        neurons = [x_dim, *h_dim, z_dim]

        layers = [nn.LSTM(neurons[i - 1], neurons[i], self.nl, batch_first=True)
                  for i in range(1, len(neurons))]
        self.hidden = nn.ModuleList(layers)

        if self.bn:
            bn_layers = [nn.BatchNorm1d(neurons[i]) for i in range(1, len(neurons))]
            self.bns = nn.ModuleList(bn_layers)

    def forward(self, x):
        if self.bn:
            for layer, bnm in zip(self.hidden, self.bns):
                out, (hs, cs) = layer(x)
                x = bnm(out)
                x = nn.Dropout(p=self.drpt_fr)(x)
        else:
            for layer in self.hidden:
                out, (hs, cs) = layer(x)
                x = nn.Dropout(p=self.drpt_fr)(out)
        return x[-1]  # -1 is used to get only the last state as per the architecture in the pic


class DecoderRNN(nn.Module):
    def __init__(self, x_dim, h_dim, z_dim, num_layers, dropout_frac, batchnorm):

        super(DecoderRNN, self).__init__()

        self.nl = num_layers
        self.drpt_fr = dropout_frac
        self.bn = batchnorm

        h_dim = list(reversed(h_dim))
        neurons = [z_dim] + h_dim

        layers = [nn.LSTM(neurons[i - 1], neurons[i], self.nl, batch_first=True)
                  for i in range(1, len(neurons))]
        self.hidden = nn.ModuleList(layers)

        if batchnorm:
            bn_layers = [nn.BatchNorm1d(neurons[i]) for i in range(1, len(neurons))]
            self.bns = nn.ModuleList(bn_layers)

        self.reconstruction = nn.Linear(h_dim[-1], x_dim)

    def forward(self, x):
        ## this is the part I have trouble trying to code

0 个答案:

没有答案