我有问题了解ConvLSTM的以下实现。我真的不明白input_size + hidden_size是什么?输出的4 * hidden_size值也是? model = ConvLSTMCell(c, d)
告诉我们c和d分别是input_size和hidden_size,它们分别是3和5。我假设c是通道,d代表输出维数?另外,为什么我们将hidden_size乘以4输出? 4是LSTM单元的默认4门吗?还有一个经验可以向我解释卷积内发生了什么吗?谢谢。
self.Gates = nn.Conv2d(input_size + hidden_size, 4 * hidden_size,
KERNEL_SIZE, padding=PADDING)
import torch
from torch import nn
import torch.nn.functional as f
from torch.autograd import Variable
# Define some constants
KERNEL_SIZE = 3
PADDING = KERNEL_SIZE // 2
class ConvLSTMCell(nn.Module):
"""
Generate a convolutional LSTM cell
"""
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.Gates = nn.Conv2d(input_size + hidden_size, 4 * hidden_size, KERNEL_SIZE, padding=PADDING)
def forward(self, input_, prev_state):
# get batch and spatial sizes
batch_size = input_.data.size()[0]
spatial_size = input_.data.size()[2:]
# generate empty prev_state, if None is provided
if prev_state is None:
state_size = [batch_size, self.hidden_size] + list(spatial_size)
prev_state = (
Variable(torch.zeros(state_size)),
Variable(torch.zeros(state_size))
)
prev_hidden, prev_cell = prev_state
# data size is [batch, channel, height, width]
stacked_inputs = torch.cat((input_, prev_hidden), 1)
gates = self.Gates(stacked_inputs)
# chunk across channel dimension
in_gate, remember_gate, out_gate, cell_gate = gates.chunk(4, 1)
# apply sigmoid non linearity
in_gate = f.sigmoid(in_gate)
remember_gate = f.sigmoid(remember_gate)
out_gate = f.sigmoid(out_gate)
# apply tanh non linearity
cell_gate = f.tanh(cell_gate)
# compute current cell and hidden state
cell = (remember_gate * prev_cell) + (in_gate * cell_gate)
hidden = out_gate * f.tanh(cell)
return hidden, cell
def _main():
"""
Run some basic tests on the API
"""
# define batch_size, channels, height, width
b, c, h, w = 1, 3, 4, 8
d = 5 # hidden state size
lr = 1e-1 # learning rate
T = 6 # sequence length
max_epoch = 20 # number of epochs
# set manual seed
torch.manual_seed(0)
print('Instantiate model')
model = ConvLSTMCell(c, d)
print(repr(model))
print('Create input and target Variables')
x = Variable(torch.rand(T, b, c, h, w))
y = Variable(torch.randn(T, b, d, h, w))
print('Create a MSE criterion')
loss_fn = nn.MSELoss()
print('Run for', max_epoch, 'iterations')
for epoch in range(0, max_epoch):
state = None
loss = 0
for t in range(0, T):
state = model(x[t], state)
loss += loss_fn(state[0], y[t])
print(' > Epoch {:2d} loss: {:.3f}'.format((epoch+1), loss.data[0]))
# zero grad parameters
model.zero_grad()
# compute new grad parameters through time!
loss.backward()
# learning_rate step against the gradient
for p in model.parameters():
p.data.sub_(p.grad.data * lr)
print('Input size:', list(x.data.size()))
print('Target size:', list(y.data.size()))
print('Last hidden state size:', list(state[0].size()))
if __name__ == '__main__':
_main()
__author__ = "Alfredo Canziani"
__credits__ = ["Alfredo Canziani"]
__maintainer__ = "Alfredo Canziani"
__email__ = "alfredo.canziani@gmail.com"
__status__ = "Prototype" # "Prototype", "Development", or "Production"
__date__ = "Jan 17"
答案 0 :(得分:1)
是的,是的,因为有4个门,所以输出的定时是4
答案 1 :(得分:-2)
ConvLSTM视频预测,图像生成,图像到字幕生成的主要用户。通用LSTM有四个门(输入,输出,忘记,单元门)。在ConvLSTM中,也存在相同的门,但是它们不执行逐元素乘法,而是使用LSTM方程执行卷积运算。