我正在使用在github中找到的模型对自己的数据进行微调。我是使用python编程的新手。
我不知道该如何解决。谁能告诉我这个错误是什么以及如何纠正这个错误?
Traceback (most recent call last): File "finetune.py", line 254, in <module> main() File "finetune.py", line 127, in main train(TrainImgLoader, model, optimizer, log, epoch) File "finetune.py", line 170, in train loss = [args.loss_weights[x] * F.smooth_l1_loss(outputs[x][mask], disp_L[mask], size_average=True) File "finetune.py", line 170, in <listcomp> loss = [args.loss_weights[x] * F.smooth_l1_loss(outputs[x][mask], disp_L[mask], size_average=True) IndexError: too many indices for tensor of dimension 3
这是代码段。
for epoch in range(args.start_epoch, args.epochs):
log.info('This is {}-th epoch'.format(epoch))
adjust_learning_rate(optimizer, epoch)
train(TrainImgLoader, model, optimizer, log, epoch)
savefilename = args.save_path + '/checkpoint.tar'
torch.save({
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, savefilename)
if epoch % 1 ==0:
test(TestImgLoader, model, log)
test(TestImgLoader, model, log)
log.info('full training time = {:.2f} Hours'.format((time.time() - start_full_time) / 3600))
def train(dataloader, model, optimizer, log, epoch=0):
stages = 3 + args.with_spn
losses = [AverageMeter() for _ in range(stages)]
length_loader = len(dataloader)
model.train()
for batch_idx, (imgL, imgR, disp_L) in enumerate(dataloader):
imgL = imgL.float()#.cuda()
imgR = imgR.float()#.cuda()
disp_L = disp_L.float()#.cuda()
optimizer.zero_grad()
mask = disp_L > 0
mask.detach_()
outputs = model(imgL, imgR)
if args.with_spn:
if epoch >= args.start_epoch_for_spn:
num_out = len(outputs)
else:
num_out = len(outputs) - 1
else:
num_out = len(outputs)
outputs = [torch.squeeze(output, 1) for output in outputs]
loss = [args.loss_weights[x] * F.smooth_l1_loss(outputs[x][mask], disp_L[mask], size_average=True)
for x in range(num_out)]
sum(loss).backward()
optimizer.step()
非常感谢!