import torch
import torch.nn as nn
import torchvision.datasets as dsets
from skimage import transform
import torchvision.transforms as transforms
from torch.autograd import Variable
import pandas as pd;
import numpy as np;
from torch.utils.data import Dataset, DataLoader
import statistics
import random
import math
class FashionMNISTDataset(Dataset):
'''Fashion MNIST Dataset'''
def __init__(self, csv_file, transform=None):
"""
Args:
csv_file (string): Path to the csv file
transform (callable): Optional transform to apply to sample
"""
data = pd.read_csv(csv_file)
self.X = np.array(data.iloc[:, 1:]).reshape(-1, 1, 28, 28)
self.Y = np.array(data.iloc[:, 0])
del data
self.transform = transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
item = self.X[idx]
label = self.Y[idx]
if self.transform:
item = self.transform(item)
return (item, label)
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(616,300),
nn.ReLU())
self.layer2 = nn.Sequential(
nn.Linear(300,100),
nn.ReLU())
self.fc = nn.Linear(100, 10)
def forward(self, x):
print("x shape",x.shape)
out = self.layer1(x)
out = self.layer2(out)
out = self.fc(out)
return out
def run():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
num_epochs = 15
batch_size = 100
learning_rate = 0.0001
train_dataset = FashionMNISTDataset(csv_file='fashion-mnist_train.csv')
test_dataset = FashionMNISTDataset(csv_file='fashion-mnist_test.csv')
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=True)
#instance of the Conv Net
cnn = CNN()
cnn.to(device)
#loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)
losses = []
for epoch in range(num_epochs):
l = 0
for i, (images, labels) in enumerate(train_loader):
images = Variable(images.float())
labels = Variable(labels)
#print(images[0])
images = images.to(device)
labels = labels.to(device)
print("img shape=",images.shape, "label shape=",labels.shape)
images = images.resize_((100,616))
print("img shape=",images.shape, "label shape=",labels.shape)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = cnn(images)
loss = criterion(outputs, labels)
#print(loss)
loss.backward()
optimizer.step()
#print(loss.item())
losses.append(loss.item())
l = loss.item()
cnn.eval()
with torch.no_grad():
val_loss = []
for images, labels in test_loader:
images = Variable(images.float()).to(device)
labels = labels.to(device)
outputs = cnn.forward(images)
batch_loss = criterion(outputs, labels)
val_loss.append(batch_loss.item())
avgloss = statistics.mean(val_loss)
if avgloss < min(losses):
torch.save(cnn.state_dict(), 'model')
cnn.train()
if (i+1) % 100 == 0:
print ('Epoch : %d/%d, Iter : %d/%d, Loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.item()))
print(l)
final_model = CNN()
final_model.load_state_dict(torch.load('model'))
final_model.eval()
correct = 0
total = 0
for images, labels in test_loader:
images = Variable(images.float()).to(device)
outputs = final_model(images).to(device)
labels.to(device)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Test Accuracy of the model on the 10000 test images: %.4f %%' % (100 * correct / total))
if __name__ == '__main__':
run()
为了测试目的,我随附了所有代码。但这是我得到的错误
img shape = torch.Size([100,1,28,28])标签shape = torch.Size([100]) img shape = torch.Size([100,616])标签shape = torch.Size([100])x 形状割炬尺寸([100,616])x形状割炬尺寸([100,1,28,28]) 追溯(最近一次通话):文件“ test.py”,行145,在 run()运行中的文件“ test.py”,第115行 输出= cnn.forward(images)文件“ test.py”,第56行,向前 out = self.layer1(x)文件“ /usr/share/anaconda3/envs/DL/lib/python3.6/site-packages/torch/nn/modules/module.py”, 第489行,在致电中 结果= self.forward(* input,** kwargs)文件“ /usr/share/anaconda3/envs/DL/lib/python3.6/site-packages/torch/nn/modules/container.py”, 第92行,向前 输入=模块(输入)文件“ /usr/share/anaconda3/envs/DL/lib/python3.6/site-packages/torch/nn/modules/module.py”, 第489行,在致电中 结果= self.forward(* input,** kwargs)文件“ /usr/share/anaconda3/envs/DL/lib/python3.6/site-packages/torch/nn/modules/linear.py”, 67行,向前 返回F.linear(input,self.weight,self.bias)文件“ /usr/share/anaconda3/envs/DL/lib/python3.6/site-packages/torch/nn/functional.py”, 1354行,线性 输出= input.matmul(weight.t())RuntimeError:大小不匹配,m1:[2800 x 28],m2:[616 x 300],位于 /opt/conda/conda-bld/pytorch_1549630534704/work/aten/src/THC/generic/THCTensorMathBlas.cu:266
这里的问题是我希望所有616像素都作为输入输入到神经网络,但我不知道该怎么做。我试图重塑输入以解决问题,但它运行了model.forward两次,一次是正确的形状,然后是错误的形状。
答案 0 :(得分:1)
您在forward
中两次致电run
:
但是,您不是似乎已对验证数据应用了以下转换:
images = images.resize_((100,616))
也许可以考虑在forward
函数中进行调整大小。