gpt4 book ai didi

python - 错误 :_pickle. PicklingError: Can't pickle at 0x0000002F2175B048>: attribute lookup on __main__ failed

转载 作者:行者123 更新时间:2023-12-04 03:51:48 28 4
gpt4 key购买 nike

我正在尝试运行以下报告与其他用户一起运行良好的代码,但我发现了这个错误。
-- 编码:utf-8 --
导入东西

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils import data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms

import cv2

import numpy as np

import csv
Step1:从日志文件中读取
samples = []
with open('data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for line in reader:
samples.append(line)

Step2:将数据划分为训练集和验证集
train_len = int(0.8*len(samples))
valid_len = len(samples) - train_len
train_samples, validation_samples = data.random_split(samples, lengths=[train_len, valid_len])
Step3a:定义数据加载器的扩充、转换过程、参数和数据集
def augment(imgName, angle):
name = 'data/IMG/' + imgName.split('/')[-1]
current_image = cv2.imread(name)
current_image = current_image[65:-25, :, :]
if np.random.rand() < 0.5:
current_image = cv2.flip(current_image, 1)
angle = angle * -1.0
return current_image, angle

class Dataset(data.Dataset):

def __init__(self, samples, transform=None):

self.samples = samples
self.transform = transform

def __getitem__(self, index):

batch_samples = self.samples[index]

steering_angle = float(batch_samples[3])

center_img, steering_angle_center = augment(batch_samples[0], steering_angle)
left_img, steering_angle_left = augment(batch_samples[1], steering_angle + 0.4)
right_img, steering_angle_right = augment(batch_samples[2], steering_angle - 0.4)

center_img = self.transform(center_img)
left_img = self.transform(left_img)
right_img = self.transform(right_img)

return (center_img, steering_angle_center), (left_img, steering_angle_left), (right_img, steering_angle_right)

def __len__(self):
return len(self.samples)
Step3b:使用数据加载器创建生成器以并行化进程
transformations = transforms.Compose([transforms.Lambda(lambda x: (x / 255.0) - 0.5)])

params = {'batch_size': 32,
'shuffle': True,
'num_workers': 4}

training_set = Dataset(train_samples, transformations)
training_generator = data.DataLoader(training_set, **params)

validation_set = Dataset(validation_samples, transformations)
validation_generator = data.DataLoader(validation_set, **params)
Step4:定义网络
类网络密集(nn.Module):
def __init__(self):
super(NetworkDense, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 24, 5, stride=2),
nn.ELU(),
nn.Conv2d(24, 36, 5, stride=2),
nn.ELU(),
nn.Conv2d(36, 48, 5, stride=2),
nn.ELU(),
nn.Conv2d(48, 64, 3),
nn.ELU(),
nn.Conv2d(64, 64, 3),
nn.Dropout(0.25)
)
self.linear_layers = nn.Sequential(
nn.Linear(in_features=64 * 2 * 33, out_features=100),
nn.ELU(),
nn.Linear(in_features=100, out_features=50),
nn.ELU(),
nn.Linear(in_features=50, out_features=10),
nn.Linear(in_features=10, out_features=1)
)

def forward(self, input):
input = input.view(input.size(0), 3, 70, 320)
output = self.conv_layers(input)
output = output.view(output.size(0), -1)
output = self.linear_layers(output)
return output


class NetworkLight(nn.Module):

def __init__(self):
super(NetworkLight, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 24, 3, stride=2),
nn.ELU(),
nn.Conv2d(24, 48, 3, stride=2),
nn.MaxPool2d(4, stride=4),
nn.Dropout(p=0.25)
)
self.linear_layers = nn.Sequential(
nn.Linear(in_features=48*4*19, out_features=50),
nn.ELU(),
nn.Linear(in_features=50, out_features=10),
nn.Linear(in_features=10, out_features=1)
)


def forward(self, input):
input = input.view(input.size(0), 3, 70, 320)
output = self.conv_layers(input)
output = output.view(output.size(0), -1)
output = self.linear_layers(output)
return output
Step5:定义优化器
model = NetworkLight()
optimizer = optim.Adam(model.parameters(), lr=0.0001)

criterion = nn.MSELoss()
Step6:检查设备并定义函数将张量移动到该设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 
print('device is: ', device)

def toDevice(datas, device):

imgs, angles = datas
return imgs.float().to(device), angles.float().to(device)
Step7:根据定义的最大时期训练和验证网络
max_epochs = 22

for epoch in range(max_epochs):

model.to(device)

# Training
train_loss = 0
model.train()
for local_batch, (centers, lefts, rights) in enumerate(training_generator):
# Transfer to GPU
centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)

# Model computations
optimizer.zero_grad()
datas = [centers, lefts, rights]
for data in datas:
imgs, angles = data
# print("training image: ", imgs.shape)
outputs = model(imgs)
loss = criterion(outputs, angles.unsqueeze(1))
loss.backward()
optimizer.step()

train_loss += loss.data[0].item()

if local_batch % 100 == 0:
print('Loss: %.3f '
% (train_loss/(local_batch+1)))


# Validation
model.eval()
valid_loss = 0
with torch.set_grad_enabled(False):
for local_batch, (centers, lefts, rights) in enumerate(validation_generator):
# Transfer to GPU
centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)

# Model computations
optimizer.zero_grad()
datas = [centers, lefts, rights]
for data in datas:
imgs, angles = data
# print("Validation image: ", imgs.shape)
outputs = model(imgs)
loss = criterion(outputs, angles.unsqueeze(1))

valid_loss += loss.data[0].item()

if local_batch % 100 == 0:
print('Valid Loss: %.3f '
% (valid_loss/(local_batch+1)))
Step8:定义状态并将模型保存到状态
state = {
'model': model.module if device == 'cuda' else model,
}

torch.save(state, 'model.h5')
这是错误消息:
"D:\VICO\Back up\venv\Scripts\python.exe" "D:/VICO/Back up/venv/Scripts/self_driving_car.py"
device is: cpu
Traceback (most recent call last):
File "D:/VICO/Back up/venv/Scripts/self_driving_car.py", line 163, in <module>
for local_batch, (centers, lefts, rights) in enumerate(training_generator):
File "D:\VICO\Back up\venv\lib\site-packages\torch\utils\data\dataloader.py", line 291, in __iter__
return _MultiProcessingDataLoaderIter(self)
File "D:\VICO\Back up\venv\lib\site-packages\torch\utils\data\dataloader.py", line 737, in __init__
w.start()
File "C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 112, in start
self._popen = self._Popen(self)
File "C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\popen_spawn_win32.py", line 89, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
_pickle.PicklingError: Can't pickle <function <lambda> at 0x0000002F2175B048>: attribute lookup <lambda> on __main__ failed

Process finished with exit code 1
我不确定解决问题的下一步。

最佳答案

pickle不腌制功能对象。它希望通过导入其模块并查找其名称来找到函数对象。 lambdas 是匿名函数(没有名字),所以它不起作用。解决方案是在模块级别命名函数。我在您的代码中找到的唯一 lambda 是

transformations = transforms.Compose([transforms.Lambda(lambda x: (x / 255.0) - 0.5)])
假设这是麻烦的功能,您可以
def _my_normalization(x):
return x/255.0 - 0.5

transformations = transforms.Compose([transforms.Lambda(_my_normalization])
您可能还有其他问题,因为看起来您是在模块级别工作。如果这是一个多处理的事情并且您在 Windows 上运行,那么新进程将导入该文件并再次运行所有该模块级代码。这在 linux/mac 上不是问题,其中 fork 的进程已经从父进程加载了模块。

关于python - 错误 :_pickle. PicklingError: Can't pickle <function <lambda> at 0x0000002F2175B048>: attribute lookup <lambda> on __main__ failed,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/64347217/

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com