gpt4 book ai didi

python - 用于学习 10 位分类的 Pytorch MNIST 自动编码器

转载 作者:行者123 更新时间:2023-12-04 15:02:56 27 4
gpt4 key购买 nike

我正在尝试为 MNIST 构建一个简单的自动编码器,其中中间层只有 10 个神经元。我希望它能学会对 10 位数字进行分类,我认为这最终会导致最低的错误(wrt 再现原始图像)。

我有以下代码,我已经使用了相当多的代码。如果我运行它多达 100 个 epoch,损失并没有真正低于 1.0,如果我评估它,它显然不起作用。我错过了什么?

训练:

import torch
import torchvision as tv
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.utils import save_image

num_epochs = 100
batch_size = 64

transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
trainset = tv.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
dataloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)

class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder,self).__init__()
self.encoder = nn.Sequential(
# 28 x 28
nn.Conv2d(1, 4, kernel_size=5),
nn.Dropout2d(p=0.2),
# 4 x 24 x 24
nn.ReLU(True),
nn.Conv2d(4, 8, kernel_size=5),
nn.Dropout2d(p=0.2),
# 8 x 20 x 20 = 3200
nn.ReLU(True),
nn.Flatten(),
nn.Linear(3200, 10),
nn.ReLU(True),
# 10
nn.Softmax(),
# 10
)
self.decoder = nn.Sequential(
# 10
nn.Linear(10, 400),
nn.ReLU(True),
# 400
nn.Unflatten(1, (1, 20, 20)),
# 20 x 20
nn.Dropout2d(p=0.2),
nn.ConvTranspose2d(1, 10, kernel_size=5),
# 24 x 24
nn.ReLU(True),
nn.Dropout2d(p=0.2),
nn.ConvTranspose2d(10, 1, kernel_size=5),
# 28 x 28
nn.ReLU(True),
nn.Sigmoid(),
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x

model = Autoencoder().cpu()
distance = nn.MSELoss()
#optimizer = torch.optim.Adam(model.parameters(), weight_decay=1e-5)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

for epoch in range(num_epochs):
for data in dataloader:
img, _ = data
img = Variable(img).cpu()
output = model(img)
loss = distance(output, img)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('epoch [{}/{}], loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))

训练损失已经表明它不起作用,而是打印出混淆矩阵(在这种情况下不一定是单位矩阵,因为神经元可以任意排序,但应该是 row-col-reordarable并估计身份,如果这可行的话):

import numpy as np

confusion_matrix = np.zeros((10, 10))

batch_size = 20*1000

testset = tv.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
dataloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=True, num_workers=4)

for data in dataloader:
imgs, labels = data
imgs = Variable(imgs).cpu()
encs = model.encoder(imgs).detach().numpy()
for i in range(len(encs)):
predicted = np.argmax(encs[i])
actual = labels[i]
confusion_matrix[actual][predicted] += 1
print(confusion_matrix)

最佳答案

Autoencoder 在技术上一般不用作分类器。他们学习如何将给定图像编码为短向量并从编码向量重建相同图像。这是一种将图像压缩成短向量的方法:

Autoencoder

由于要训练具有分类能力的自动编码器,我们需要对模型进行一些更改。首先,会有两种不同的损失:

  1. MSE 损失:当前自动编码器重建损失。这将迫使网络通过采用压缩表示来输出尽可能接近给定图像的图像。
  2. 分类损失:经典交叉熵应该可以解决问题。这种损失将采用压缩表示(C 维)和目标标签来计算负对数似然损失。这种损失将迫使编码器输出压缩表示,使其与目标类很好地对齐。

我对您的代码进行了一些更改,以使组合模型正常工作。首先,让我们看一下代码:

 import torch
import torchvision as tv
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.utils import save_image

num_epochs = 10
batch_size = 64
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])

trainset = tv.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
testset = tv.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
dataloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=True, num_workers=4)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class Autoencoderv3(nn.Module):
def __init__(self):
super(Autoencoderv3,self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1, 4, kernel_size=5),
nn.Dropout2d(p=0.1),
nn.ReLU(True),
nn.Conv2d(4, 8, kernel_size=5),
nn.Dropout2d(p=0.1),
nn.ReLU(True),
nn.Flatten(),
nn.Linear(3200, 10)
)
self.softmax = nn.Softmax(dim=1)
self.decoder = nn.Sequential(
nn.Linear(10, 400),
nn.ReLU(True),
nn.Unflatten(1, (1, 20, 20)),
nn.Dropout2d(p=0.1),
nn.ConvTranspose2d(1, 10, kernel_size=5),
nn.ReLU(True),
nn.Dropout2d(p=0.1),
nn.ConvTranspose2d(10, 1, kernel_size=5)
)

def forward(self, x):
out_en = self.encoder(x)
out = self.softmax(out_en)
out = self.decoder(out)
return out, out_en

model = Autoencoderv3().to(device)
distance = nn.MSELoss()
class_loss = nn.CrossEntropyLoss()

optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

mse_multp = 0.5
cls_multp = 0.5

model.train()

for epoch in range(num_epochs):
total_mseloss = 0.0
total_clsloss = 0.0
for ind, data in enumerate(dataloader):
img, labels = data[0].to(device), data[1].to(device)
output, output_en = model(img)
loss_mse = distance(output, img)
loss_cls = class_loss(output_en, labels)
loss = (mse_multp * loss_mse) + (cls_multp * loss_cls) # Combine two losses together
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Track this epoch's loss
total_mseloss += loss_mse.item()
total_clsloss += loss_cls.item()

# Check accuracy on test set after each epoch:
model.eval() # Turn off dropout in evaluation mode
acc = 0.0
total_samples = 0
for data in testloader:
# We only care about the 10 dimensional encoder output for classification
img, labels = data[0].to(device), data[1].to(device)
_, output_en = model(img)
# output_en contains 10 values for each input, apply softmax to calculate class probabilities
prob = nn.functional.softmax(output_en, dim = 1)
pred = torch.max(prob, dim=1)[1].detach().cpu().numpy() # Max prob assigned to class
acc += (pred == labels.cpu().numpy()).sum()
total_samples += labels.shape[0]
model.train() # Enables dropout back again
print('epoch [{}/{}], loss_mse: {:.4f} loss_cls: {:.4f} Acc on test: {:.4f}'.format(epoch+1, num_epochs, total_mseloss / len(dataloader), total_clsloss / len(dataloader), acc / total_samples))

此代码现在应该将模型训练为分类器和生成自动编码器。但总的来说,这种类型的方法在训练模型时可能有点棘手。在这种情况下,MNIST 数据非常简单,可以将这两个互补的损失训练在一起。在生成对抗网络 (GAN) 等更复杂的情况下,他们应用模型训练切换、卡住一个模型等来训练整个模型。这个自动编码器模型可以在 MNIST 上轻松训练,无需执行这些类型的技巧:

 epoch [1/10], loss_mse: 0.8928  loss_cls: 0.4627  Acc on test: 0.9463
epoch [2/10], loss_mse: 0.8287 loss_cls: 0.2105 Acc on test: 0.9639
epoch [3/10], loss_mse: 0.7803 loss_cls: 0.1574 Acc on test: 0.9737
epoch [4/10], loss_mse: 0.7513 loss_cls: 0.1290 Acc on test: 0.9764
epoch [5/10], loss_mse: 0.7298 loss_cls: 0.1117 Acc on test: 0.9762
epoch [6/10], loss_mse: 0.7110 loss_cls: 0.1017 Acc on test: 0.9801
epoch [7/10], loss_mse: 0.6962 loss_cls: 0.0920 Acc on test: 0.9794
epoch [8/10], loss_mse: 0.6824 loss_cls: 0.0859 Acc on test: 0.9806
epoch [9/10], loss_mse: 0.6733 loss_cls: 0.0797 Acc on test: 0.9814
epoch [10/10], loss_mse: 0.6671 loss_cls: 0.0764 Acc on test: 0.9813

如您所见,mse 损失和分类损失都在减少,而在测试集上的准确率在增加。在代码中,将 MSE 损失和分类损失相加。这意味着从每个损失中计算出的各自梯度正在相互竞争,以迫使网络朝着它们的方向发展。我添加了损失乘数来控制每次损失的贡献。如果 MSE 具有更高的乘数,网络将从 MSE 损失中获得更多梯度,这意味着它将更好地学习重建,如果 CLS 损失具有更高的乘数,网络将获得更好的分类精度。您可以使用这些乘数来查看最终结果如何变化,但 MNIST 是一个非常简单的数据集,因此可能很难看出差异。目前,它在重建输入方面做得还不错:

 import numpy as np
import matplotlib.pyplot as plt

model.eval()
img, labels = list(dataloader)[0]
img = img.to(device)
output, output_en = model(img)
inp = img[0:10, 0, :, :].squeeze().detach().cpu()
out = output[0:10, 0, :, :].squeeze().detach().cpu()

# Just some trick to concatenate first ten images next to each other
inp = inp.permute(1,0,2).reshape(28, -1).numpy()
out = out.permute(1,0,2).reshape(28, -1).numpy()
combined = np.vstack([inp, out])

plt.imshow(combined)
plt.show()

Reconstrunction

我相信通过更多的训练和微调损失乘数,您可以获得更好的结果。

最后,解码器接收编码器输出的 softmax。这意味着解码器会尝试根据输入的 0 - 1 概率创建输出图像。因此,如果 softmax 概率向量在输入位置 0 处为 0.98 而在其他位置接近于零,则解码器应该输出看起来像 0.0 的图像。在这里,我给网络输入以创建 0 到 9 次重建:

 test_arr = np.zeros([10, 10], dtype = np.float32)
ind = np.arange(0, 10)
test_arr[ind, ind] = 1.0

model.eval()
img = torch.from_numpy(test_arr).to(device)
out = model.decoder(img)
out = out[0:10, 0, :, :].squeeze().detach().cpu()
out = out.permute(1,0,2).reshape(28, -1).numpy()
plt.imshow(out)
plt.show()

0 to 10 reconstruction

我还在代码中做了一些小改动,打印了 epoch 平均损失等。这并没有真正改变训练逻辑,所以你可以在代码中看到这些变化,如果有什么奇怪的地方,请告诉我。

关于python - 用于学习 10 位分类的 Pytorch MNIST 自动编码器,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/66667949/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com