任务:使用“ fetch_lfw_people”数据集的示例编写和训练自动编码器。 按纪元编写一个迭代代码。编写代码以可视化学习过程,并在每个时期后计数指标以进行验证。 火车自动编码器。降低验证损失。
我的代码:
from sklearn.datasets import fetch_lfw_people
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
数据准备:
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
X = lfw_people['images']
X_train, X_test = train_test_split(X, test_size=0.1)
X_train = torch.tensor(X_train, dtype=torch.float32, requires_grad=True)
X_test = torch.tensor(X_test, dtype=torch.float32, requires_grad=False)
dataset_train = TensorDataset(X_train, torch.zeros(len(X_train)))
dataset_test = TensorDataset(X_test, torch.zeros(len(X_test)))
batch_size = 32
train_loader = DataLoader(dataset_train, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset_test, batch_size=batch_size, shuffle=False)
创建具有编码和解码功能的网络:
class Autoencoder(torch.nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.encoder = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=2),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=32, out_channels=64, stride=2, kernel_size=3),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=64, out_channels=64, stride=2, kernel_size=3),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=64, out_channels=64, stride=2, kernel_size=3)
)
self.decoder = torch.nn.Sequential(
torch.nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=3, stride=2),
torch.nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=(3,4), stride=2),
torch.nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=4, stride=2),
torch.nn.ConvTranspose2d(in_channels=32, out_channels=1, kernel_size=(4,3), stride=2)
)
def encode(self, X):
encoded_X = self.encoder(X)
batch_size = X.shape[0]
return encoded_X.reshape(batch_size, -1)
def decode(self, X):
pre_decoder = X.reshape(-1, 64, 2, 1)
return self.decoder(pre_decoder)
在学习之前,我通过一个示例检查了模型的工作:
model = Autoencoder()
sample = X_test[:1]
sample = sample[:, None]
result = model.decode(model.encode(sample)) # before train
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
ax1.imshow(sample[0][0].detach().numpy(), cmap=plt.cm.Greys_r)
ax2.imshow(result[0][0].detach().numpy(), cmap=plt.cm.Greys_r)
plt.show()
结果不理想。我开始训练:
model = Autoencoder()
loss = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
history_train = []
history_test = []
for i in range(5):
for x, y in train_loader:
x = x[:, None]
model.train()
decoded_x = model.decode(model.encode(x))
mse_loss = loss(torch.tensor(decoded_x, dtype=torch.float), x)
optimizer.zero_grad()
mse_loss.backward()
optimizer.step()
history_train.append(mse_loss.detach().numpy())
model.eval()
with torch.no_grad():
for x, y in train_loader:
x = x[:, None]
result_x = model.decode(model.encode(x))
loss_test = loss(torch.tensor(result_x, dtype=torch.float), x)
history_test.append(loss_test.detach().numpy())
plt.subplot(1, 2, 1)
plt.plot(history_train)
plt.title("Optimization process for train data")
plt.subplot(1, 2, 2)
plt.plot(history_test)
plt.title("Loss for test data")
plt.show
训练数据和测试上的巨大损失。
培训后,一切都没有改变:
with torch.no_grad():
model.eval()
res1 = model.decode(model.encode(sample))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
ax1.imshow(sample[0][0].detach().numpy(), cmap=plt.cm.Greys_r)
ax2.imshow(res1[0][0].detach().numpy(), cmap=plt.cm.Greys_r)
plt.show()
为什么会有这么大的损失?将输入减小为间隔[-1,1]并没有帮助。我这样做是这样的:(值/ 255)* 2-1 训练后为什么不更改模型的参数? 为什么不更改解码后的样本?
结果:火车前,火车后,损失 https://i.stack.imgur.com/OhdrJ.jpg
答案 0 :(得分:0)
1)替换行
mse_loss = loss(torch.tensor(decoded_x, dtype=torch.float), x)
带线
mse_loss = loss(decoded_x, x)
2)替换行
model.eval()
with torch.no_grad():
for x, y in train_loader:
带线
替换行
model.eval()
with torch.no_grad():
for x, y in test_loader: