IndexError:索引超出自身范围

时间:2020-10-21 19:02:17

标签: python pytorch lstm

我在运行时遇到此错误,您能帮我吗?

我的vocab大小为76,如下所示,

我的一些代码如下:

 class LSTMClassifier(nn.Module):
    def __init__(self, embed_size, hidden_size, vocab_size, num_layers, num_classes, batch_size):
        super(LSTMClassifier, self).__init__()
        
        self.embed_size = embed_size
        self.hidden_size = hidden_size
        self.batch_size = batch_size
        self.num_layers = num_layers
        
        self.embedding = nn.Embedding(vocab_size, embed_size) # a lookup table
        self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, dropout=0.3, bidirectional=True)
        self.fc = nn.Sequential(
            nn.Linear(2*hidden_size, 100),
            nn.ReLU(),
            nn.Dropout(p=0.2),
            nn.Linear(100, num_classes)
        )
        self.hidden = self.init_hidden()
    
    def init_hidden(self):
        h = to_var(torch.zeros((2*self.num_layers, self.batch_size, self.hidden_size)))
        c = to_var(torch.zeros((2*self.num_layers, self.batch_size, self.hidden_size)))
        return h, c
    def forward(self, x):
        x = self.embedding(x)
        x, self.hidden = self.lstm(x, self.hidden)
        x = self.fc(x[-1])  # select the last output
        return x



 # LSTM parameters
embed_size =100
hidden_size = 256
num_layers = 1

# training parameters
lr = 0.001
num_epochs = 10


vocab_size = 2 + len([w for (w, c) in train_ds.vocab.word2count.items() if c >= min_count])
print(vocab_size)

76



model = LSTMClassifier(embed_size=embed_size, 
                       hidden_size=hidden_size, 
                       vocab_size=vocab_size,
                       num_layers=num_layers,
                       num_classes=train_ds.num_classes, 
                       batch_size=batch_size)

if use_gpu:
    model = model.cuda()

hidden_size=hidden_size,

                       vocab_size=vocab_size,
                       num_layers=num_layers,
                       num_classes=train_ds.num_classes, 
                       batch_size=batch_size)


if use_gpu:
    model = model.cuda()

criterion = nn.CrossEntropyLoss()
if use_gpu:
    criterion = criterion.cuda()


optimizer = optim.Adam(model.parameters(), lr=lr, betas=(0.7, 0.99))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.975)

hist = train(model, train_dl, valid_dl, criterion, optimizer, scheduler, num_epochs)

错误

> IndexError                                Traceback (most recent call
> last) <ipython-input-55-7e0f888e140e> in <module>
> ----> 1 hist = train(model, train_dl, valid_dl, criterion, optimizer, scheduler, num_epochs)
> 
> ~\train_utils.py in train(model, train_dl, valid_dl, criterion,
> optimizer, scheduler, num_epochs)
>      82 
>      83         ## perform one epoch of training and validation
> ---> 84         trn_loss, trn_acc = train_step(model, train_dl, criterion, optimizer, scheduler)
>      85         val_loss, val_acc = validate_step(model, valid_dl, criterion)
>      86 
> 
> ~\train_utils.py in train_step(model, train_dl, criterion, optimizer,
> scheduler)
>      26         model.hidden = detach(model.hidden)
>      27         model.zero_grad()
> ---> 28         output = model(train_inputs.t())
>      29 
>      30         loss = criterion(output, train_labels)
> 
> C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py
> in _call_impl(self, *input, **kwargs)
>     720             result = self._slow_forward(*input, **kwargs)
>     721         else:
> --> 722             result = self.forward(*input, **kwargs)
>     723         for hook in itertools.chain(
>     724                 _global_forward_hooks.values(),
> 
> <ipython-input-21-360ed93de0e5> in forward(self, x)
>      24 
>      25     def forward(self, x):
> ---> 26         x = self.embedding(x)
>      27         x, self.hidden = self.lstm(x, self.hidden)
>      28         x = self.fc(x[-1])  # select the last output
> 
> C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py
> in _call_impl(self, *input, **kwargs)
>     720             result = self._slow_forward(*input, **kwargs)
>     721         else:
> --> 722             result = self.forward(*input, **kwargs)
>     723         for hook in itertools.chain(
>     724                 _global_forward_hooks.values(),
> 
> C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\sparse.py
> in forward(self, input)
>     122 
>     123     def forward(self, input: Tensor) -> Tensor:
> --> 124         return F.embedding(
>     125             input, self.weight, self.padding_idx, self.max_norm,
>     126             self.norm_type, self.scale_grad_by_freq, self.sparse)
> 
> C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\functional.py in
> embedding(input, weight, padding_idx, max_norm, norm_type,
> scale_grad_by_freq, sparse)    1812         # remove once script
> supports set_grad_enabled    1813        
> _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
> -> 1814     return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)    1815     1816 
> 
> IndexError: index out of range in self

0 个答案:

没有答案