我在用LSTM
实现Pytorch
时遇到数据类型问题。基于类似的问题,您尝试将Input,h和c的格式更改为ShortTensor
,但是仍然出现相同的错误:
RuntimeError:标量类型的预期对象很短,但是得到了标量类型 调用_th_mm
时,参数#2'mat2'的浮动
class data(Dataset):
def __init__(self, samples=10000, number=30):
self.x = torch.from_numpy(np.matrix
(np.random.random_integers(0,9,samples*number).reshape(samples, number)))
self.y = torch.from_numpy(np.zeros((samples))).type(torch.ShortTensor)
for index, row in enumerate(self.x):
self.y[index] = 1 if torch.sum(row) >= 130 else 0
class LSTM(nn.Module):
def __init__(self,i_size, h_size, n_layer, batch_size = 30 ):
super().__init__()
self.lstm = nn.LSTM(input_size=i_size, hidden_size=h_size, num_layers=n_layer)
self.h = torch.randn(n_layer, batch_size, h_size).type(torch.ShortTensor)
self.c = torch.randn(n_layer, batch_size, h_size).type(torch.ShortTensor)
self.hidden = (self.h, self.c)
self.linear = nn.Linear(n_layer, 1)
def forward(self,x):
out, hidden = self.lstm(x.type(torch.ShortTensor), self.hidden)
out = nn.Softmax(self.linear(out.short()))
return out
data_set = data()
train_data = data_set.x[0:8000, :, None]
train_label = data_set.y[0:8000]
test_data = data_set.x[8000:, : , None]
test_label = data_set.y[8000:]
input_size = 1
hidden_size = 30
layer_num = 200
model_LSTM = LSTM(input_size, hidden_size, layer_num)
#model_LSTM.cuda()
y_ = model_LSTM(train_data)
答案 0 :(得分:0)
我在int16
中生成输入数据,显然nn.LSTM
仅占用float32
,并且错误尚不清楚。