gpt4 book ai didi

python-3.x - Pytorch:ValueError:预期输入 batch_size (32) 以匹配目标 batch_size (64)

转载 作者:行者123 更新时间:2023-12-02 00:13:17 27 4
gpt4 key购买 nike

尝试在 MNIST 数据集上运行 CNN 示例,批量大小 = 64, channel = 1,n_h = 28,n_w = 28,n_iters = 1000。程序运行前 500 次交互,然后出现上述错误。论坛上已经讨论了相同的主题,例如:topic 1topic 2 , 但它们都不能帮助我识别以下代码中的错误:

class CNN_MNIST(nn.Module):
def __init__(self):
super(CNN_MNIST,self).__init__()

# convolution layer 1
self.cnn1 = nn.Conv2d(in_channels=1, out_channels= 32, kernel_size=5,
stride=1,padding=2)

# ReLU activation
self.relu1 = nn.ReLU()

# maxpool 1
self.maxpool1 = nn.MaxPool2d(kernel_size=2,stride=2)

# convolution 2
self.cnn2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5,
stride=1,padding=2)

# ReLU activation
self.relu2 = nn.ReLU()

# maxpool 2
self.maxpool2 = nn.MaxPool2d(kernel_size=2,stride=2)

# fully connected 1
self.fc1 = nn.Linear(7*7*64,1000)
# fully connected 2
self.fc2 = nn.Linear(1000,10)

def forward(self,x):

# convolution 1
out = self.cnn1(x)
# activation function
out = self.relu1(out)
# maxpool 1
out = self.maxpool1(out)

# convolution 2
out = self.cnn2(out)
# activation function
out = self.relu2(out)
# maxpool 2
out = self.maxpool2(out)

# flatten the output
out = out.view(out.size(0),-1)

# fully connected layers
out = self.fc1(out)
out = self.fc2(out)

return out
# model trainning
count = 0
loss_list = []
iteration_list = []
accuracy_list = []

for epoch in range(int(n_epochs)):
for i, (image,labels) in enumerate(train_loader):

train = Variable(image)
labels = Variable(labels)

# clear gradient
optimizer.zero_grad()

# forward propagation
output = cnn_model(train)

# calculate softmax and cross entropy loss
loss = error(output,label)

# calculate gradients
loss.backward()

# update the optimizer
optimizer.step()

count += 1

if count % 50 ==0:
# calculate the accuracy
correct = 0
total = 0

# iterate through the test data
for image, labels in test_loader:

test = Variable(image)

# forward propagation
output = cnn_model(test)

# get prediction
predict = torch.max(output.data,1)[1]

# total number of labels
total += len(labels)

# correct prediction
correct += (predict==labels).sum()

# accuracy
accuracy = 100*correct/float(total)

# store loss, number of iteration, and accuracy
loss_list.append(loss.data)
iteration_list.append(count)
accuracy_list.append(accuracy)

# print loss and accurcay as the algorithm progresses
if count % 500 ==0:
print('Iteration :{} Loss :{} Accuracy :

{}'.format(count,loss.item(),accuracy))

错误如下:

    ---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-19-9e93a242961b> in <module>
18
19 # calculate softmax and cross entropy loss
---> 20 loss = error(output,label)
21
22 # calculate gradients

~\Anaconda3\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
545 result = self._slow_forward(*input, **kwargs)
546 else:
--> 547 result = self.forward(*input, **kwargs)
548 for hook in self._forward_hooks.values():
549 hook_result = hook(self, input, result)

~\Anaconda3\lib\site-packages\torch\nn\modules\loss.py in forward(self, input, target)
914 def forward(self, input, target):
915 return F.cross_entropy(input, target, weight=self.weight,
--> 916 ignore_index=self.ignore_index, reduction=self.reduction)
917
918

~\Anaconda3\lib\site-packages\torch\nn\functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
1993 if size_average is not None or reduce is not None:
1994 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 1995 return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
1996
1997

~\Anaconda3\lib\site-packages\torch\nn\functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
1820 if input.size(0) != target.size(0):
1821 raise ValueError('Expected input batch_size ({}) to match target batch_size ({}).'
-> 1822 .format(input.size(0), target.size(0)))
1823 if dim == 2:
1824 ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)

ValueError: Expected input batch_size (32) to match target batch_size (64).

最佳答案

您为损失提供了错误的目标:

loss = error(output, label)

当你的装载机给你

    for i, (image,labels) in enumerate(train_loader):

train = Variable(image)
labels = Variable(labels)

所以你有一个来自加载器的变量名labels(带有s),但是你提供label(没有s) 你的损失。

批量大小是您最不需要担心的。

关于python-3.x - Pytorch:ValueError:预期输入 batch_size (32) 以匹配目标 batch_size (64),我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/58059221/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com