gpt4 book ai didi

python - 如何在 Pytorch 中为图像及​​其掩模制作自定义数据集?

转载 作者:太空宇宙 更新时间:2023-11-03 21:30:53 24 4
gpt4 key购买 nike

我有两个tif图像的数据集文件夹,一个是名为BMMCdata的文件夹,另一个是名为BMMCmasks的BMMCdata图像的掩模(图像的名称是对应的)。我正在尝试制作一个定制的数据集,并随机分割数据以进行训练和测试。目前我收到错误

self.filenames.append(fn)
AttributeError: 'CustomDataset' object has no attribute 'filenames'

任何评论都将不胜感激。

import torch
from torch.utils.data.dataset import Dataset # For custom data-sets
from torchvision import transforms
from PIL import Image
import os.path as osp
import glob

folder_data = "/Users/parto/PycharmProjects/U-net/BMMCdata/data"

class CustomDataset(Dataset):
def __init__(self, root):

self.filename = folder_data
self.root = root
self.to_tensor = transforms.ToTensor()
filenames = glob.glob(osp.join(folder_data, '*.tif'))
for fn in filenames:
self.filenames.append(fn)
self.len = len(self.filenames)
print(fn)

def __getitem__(self, index):
image = Image.open(self.filenames[index])
return self.transform(image)

def __len__(self):

return self.len
custom_img = CustomDataset(folder_data)
# total images in set
print(custom_img.len)

train_len = int(0.6*custom_img.len)
test_len = custom_img.len - train_len
train_set, test_set = CustomDataset.random_split(custom_img, lengths=[train_len, test_len])
# check lens of subset
len(train_set), len(test_set)

train_set = CustomDataset(folder_data)
train_set = torch.utils.data.TensorDataset(train_set, train=True, batch_size=4)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=4, shuffle=True, num_workers=1)
print(train_set)
print(train_loader)

test_set = torch.utils.data.DataLoader(Dataset, batch_size=4, sampler= train_sampler)
test_loader = torch.utils.data.DataLoader(Dataset, batch_size=4)

最佳答案

@ptrblck 在 pytorch 社区给出的答案。谢谢

 # get all the image and mask path and number of images
folder_data = glob.glob("D:\\Neda\\Pytorch\\U-net\\BMMCdata\\data\\*.tif")
folder_mask = glob.glob("D:\\Neda\\Pytorch\\U-net\\BMMCmasks\\masks\\*.tif")

# split these path using a certain percentage
len_data = len(folder_data)
print(len_data)
train_size = 0.6

train_image_paths = folder_data[:int(len_data*train_size)]
test_image_paths = folder_data[int(len_data*train_size):]

train_mask_paths = folder_mask[:int(len_data*train_size)]
test_mask_paths = folder_mask[int(len_data*train_size):]


class CustomDataset(Dataset):
def __init__(self, image_paths, target_paths, train=True): # initial logic
happens like transform

self.image_paths = image_paths
self.target_paths = target_paths
self.transforms = transforms.ToTensor()

def __getitem__(self, index):

image = Image.open(self.image_paths[index])
mask = Image.open(self.target_paths[index])
t_image = self.transforms(image)
return t_image, mask

def __len__(self): # return count of sample we have

return len(self.image_paths)

train_dataset = CustomDataset(train_image_paths, train_mask_paths, train=True)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=1)

test_dataset = CustomDataset(test_image_paths, test_mask_paths, train=False)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=4, shuffle=False, num_workers=1)

关于python - 如何在 Pytorch 中为图像及​​其掩模制作自定义数据集?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/53530751/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com