gpt4 book ai didi

tensorflow - 在使用 tf.keras.preprocessing.image_dataset_from_directory() 时如何在预测期间获取文件名?

转载 作者:行者123 更新时间:2023-12-04 11:05:47 32 4
gpt4 key购买 nike

Keras 介绍 tf.keras.preprocessing.image_dataset_from_directory功能最近,比以前更高效ImageDataGenerator.flow_from_directory tensorflow 2.x 中的方法。

我正在练习 catvsdogs 问题并使用此函数为我的模型构建数据管道。训练模型后,我使用 preds = model.predict(test_ds) 来获取我的测试数据集的预测。我应该如何将pred与图片名称匹配? (之前有 generator.filenames,但在新方法中不再存在。)谢谢!

最佳答案

我有一个类似的问题。解决方案是采用底层 tf.keras.preprocessing.image_dataset_from_directory 函数并将 'image_paths' 变量添加到 return 语句。这不会产生计算开销,因为文件名已经被检索到。
主要功能代码取自GitHub:https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/keras/preprocessing/image_dataset.py#L34-L206
见下文:

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np

from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.keras.layers.preprocessing import image_preprocessing
from tensorflow.python.keras.preprocessing import dataset_utils
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.util.tf_export import keras_export

WHITELIST_FORMATS = ('.bmp', '.gif', '.jpeg', '.jpg', '.png')

## Tensorflow override method to return fname as list as well as dataset

def image_dataset_from_directory(directory,
labels='inferred',
label_mode='int',
class_names=None,
color_mode='rgb',
batch_size=32,
image_size=(256, 256),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation='bilinear',
follow_links=False):

if labels != 'inferred':
if not isinstance(labels, (list, tuple)):
raise ValueError(
'`labels` argument should be a list/tuple of integer labels, of '
'the same size as the number of image files in the target '
'directory. If you wish to infer the labels from the subdirectory '
'names in the target directory, pass `labels="inferred"`. '
'If you wish to get a dataset that only contains images '
'(no labels), pass `label_mode=None`.')
if class_names:
raise ValueError('You can only pass `class_names` if the labels are '
'inferred from the subdirectory names in the target '
'directory (`labels="inferred"`).')
if label_mode not in {'int', 'categorical', 'binary', None}:
raise ValueError(
'`label_mode` argument must be one of "int", "categorical", "binary", '
'or None. Received: %s' % (label_mode,))
if color_mode == 'rgb':
num_channels = 3
elif color_mode == 'rgba':
num_channels = 4
elif color_mode == 'grayscale':
num_channels = 1
else:
raise ValueError(
'`color_mode` must be one of {"rbg", "rgba", "grayscale"}. '
'Received: %s' % (color_mode,))
interpolation = image_preprocessing.get_interpolation(interpolation)
dataset_utils.check_validation_split_arg(
validation_split, subset, shuffle, seed)

if seed is None:
seed = np.random.randint(1e6)
image_paths, labels, class_names = dataset_utils.index_directory(
directory,
labels,
formats=WHITELIST_FORMATS,
class_names=class_names,
shuffle=shuffle,
seed=seed,
follow_links=follow_links)

if label_mode == 'binary' and len(class_names) != 2:
raise ValueError(
'When passing `label_mode="binary", there must exactly 2 classes. '
'Found the following classes: %s' % (class_names,))

image_paths, labels = dataset_utils.get_training_or_validation_split(
image_paths, labels, validation_split, subset)

dataset = paths_and_labels_to_dataset(
image_paths=image_paths,
image_size=image_size,
num_channels=num_channels,
labels=labels,
label_mode=label_mode,
num_classes=len(class_names),
interpolation=interpolation)
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.batch(batch_size)
# Users may need to reference `class_names`.
dataset.class_names = class_names
return dataset, image_paths

def paths_and_labels_to_dataset(image_paths,
image_size,
num_channels,
labels,
label_mode,
num_classes,
interpolation):
"""Constructs a dataset of images and labels."""
# TODO(fchollet): consider making num_parallel_calls settable
path_ds = dataset_ops.Dataset.from_tensor_slices(image_paths)
img_ds = path_ds.map(
lambda x: path_to_image(x, image_size, num_channels, interpolation))
if label_mode:
label_ds = dataset_utils.labels_to_dataset(labels, label_mode, num_classes)
img_ds = dataset_ops.Dataset.zip((img_ds, label_ds))
return img_ds


def path_to_image(path, image_size, num_channels, interpolation):
img = io_ops.read_file(path)
img = image_ops.decode_image(
img, channels=num_channels, expand_animations=False)
img = image_ops.resize_images_v2(img, image_size, method=interpolation)
img.set_shape((image_size[0], image_size[1], num_channels))
return img
然后它将作为:
train_dir = '/content/drive/My Drive/just_monkeying_around/monkey_training'
BATCH_SIZE = 32
IMG_SIZE = (224, 224)

train_dataset, train_paths = image_dataset_from_directory(train_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
train_paths 返回文件字符串列表。

关于tensorflow - 在使用 tf.keras.preprocessing.image_dataset_from_directory() 时如何在预测期间获取文件名?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/62166588/

32 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com