我正在尝试使用以下代码将每个训练过的模型的最终权重添加到列表中:
%reset -f
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.utils.data as data_utils
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from matplotlib import pyplot
from pandas import DataFrame
import torchvision.datasets as dset
import os
import torch.nn.functional as F
import time
import random
import pickle
from sklearn.metrics import confusion_matrix
import pandas as pd
import sklearn
trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
root = './data'
if not os.path.exists(root):
os.mkdir(root)
train_set = dset.MNIST(root=root, train=True, transform=trans, download=True)
test_set = dset.MNIST(root=root, train=False, transform=trans, download=True)
batch_size = 64
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=batch_size,
shuffle=True)
class NeuralNet(nn.Module):
def __init__(self):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(28*28, 500)
self.fc2 = nn.Linear(500, 256)
self.fc3 = nn.Linear(256, 2)
def forward(self, x):
x = x.view(-1, 28*28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
num_epochs = 2
random_sample_size = 200
values_0_or_1 = [t for t in train_set if (int(t[1]) == 0 or int(t[1]) == 1)]
values_0_or_1_testset = [t for t in test_set if (int(t[1]) == 0 or int(t[1]) == 1)]
print(len(values_0_or_1))
print(len(values_0_or_1_testset))
train_loader_subset = torch.utils.data.DataLoader(
dataset=values_0_or_1,
batch_size=batch_size,
shuffle=True)
test_loader_subset = torch.utils.data.DataLoader(
dataset=values_0_or_1_testset,
batch_size=batch_size,
shuffle=False)
train_loader = train_loader_subset
# Hyper-parameters
input_size = 100
hidden_size = 100
num_classes = 2
# learning_rate = 0.00001
learning_rate = .0001
# Device configuration
device = 'cpu'
print_progress_every_n_epochs = 1
model = NeuralNet().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
N = len(train_loader)
# Train the model
total_step = len(train_loader)
rm = random.sample(list(values_0_or_1), random_sample_size)
train_loader_subset = data_utils.DataLoader(rm, batch_size=4)
weights_without_clone = []
weights_with_clone = []
for i in range(0 , 2) :
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader_subset):
# Move tensors to the configured device
images = images.reshape(-1, 2).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch) % print_progress_every_n_epochs == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
print('model fc2 weights ' , model.fc2.weight.data)
模型输出:
12665
2115
Epoch [1/2], Step [50/198], Loss: 0.0968
Epoch [2/2], Step [50/198], Loss: 0.0082
model fc2 weights tensor([[-3.9507e-02, -4.0454e-02, 3.5576e-03, ..., 6.2181e-03,
4.1372e-02, -6.2960e-03],
[ 1.8778e-02, 2.7049e-02, -3.5624e-02, ..., 2.6797e-02,
2.2041e-03, -4.2284e-02],
[ 1.9571e-02, -3.2545e-02, 2.6618e-02, ..., -1.6139e-02,
4.1192e-02, -2.3458e-02],
...,
[-4.6123e-03, 2.6943e-02, 3.9979e-02, ..., -3.3848e-02,
3.6096e-02, 2.4211e-02],
[-1.4698e-02, 9.7528e-04, -2.5244e-03, ..., -3.3145e-02,
1.0888e-02, 3.1091e-02],
[-1.7451e-02, -2.1646e-02, 2.5885e-02, ..., 4.0453e-02,
-6.5324e-03, -3.5410e-02]])
Epoch [1/2], Step [50/198], Loss: 0.0025
Epoch [2/2], Step [50/198], Loss: 0.0013
model fc2 weights tensor(1.00000e-02 *
[[-3.9891, -4.0454, 0.3558, ..., 0.7168, 4.1902, -0.6253],
[ 1.8766, 2.7049, -3.5632, ..., 2.6785, 0.2192, -4.2297],
[ 2.1426, -3.2545, 2.6621, ..., -1.6285, 4.1196, -2.2653],
...,
[-0.4930, 2.6943, 3.9971, ..., -3.2940, 3.6641, 2.4248],
[-1.5160, 0.0975, -0.2524, ..., -3.1938, 1.1753, 3.1065],
[-1.8116, -2.1646, 2.5883, ..., 4.1355, -0.5921, -3.5416]])
为什么张量(1.00000e-02 *被放在输出的前面?
为了访问真实的权重值,我需要将权重张量的每个值乘以1.00000e-02
吗?