gpt4 book ai didi

python - 如何在不降低精度的情况下调整 tensorflow_federated TFF 中 CIFAR100 的超参数?

转载 作者:行者123 更新时间:2023-12-05 04:30:02 27 4
gpt4 key购买 nike

我正在尝试使用 CIFAR100 数据集 测试本教程 https://www.tensorflow.org/federated/tutorials/tff_for_federated_learning_research_compression,但准确性每一轮都在下降!

是我调超参数的原因吗??

这是我的代码:

cifar_train, cifar_test = tff.simulation.datasets.cifar100.load_data()

MAX_CLIENT_DATASET_SIZE = 418

CLIENT_EPOCHS_PER_ROUND = 1
CLIENT_BATCH_SIZE = 20
TEST_BATCH_SIZE = 500

def reshape_cifar_element(element):
return (tf.expand_dims(element['image'], axis=-1), element['label'])

def preprocess_train_dataset(dataset):
"""Preprocessing function for the EMNIST training dataset."""
return (dataset
# Shuffle according to the largest client dataset
.shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)
# Repeat to do multiple local epochs
.repeat(CLIENT_EPOCHS_PER_ROUND)
# Batch to a fixed client batch size
.batch(CLIENT_BATCH_SIZE, drop_remainder=False)
# Preprocessing step
.map(reshape_cifar_element))

cifar_train = cifar_train.preprocess(preprocess_train_dataset)

# defining a model
def create_original_fedavg_cnn_model():
data_format = 'channels_last'

max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)

model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(32, 32, 3)),
conv2d(filters=32),
max_pool(),
conv2d(filters=64),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(100, activation=None),
tf.keras.layers.Softmax(),
])
return model

input_spec = cifar_train.create_tf_dataset_for_client(
cifar_train.client_ids[0]).element_spec

def tff_model_fn():
keras_model = create_original_fedavg_cnn_model()
return tff.learning.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])

# training the model
federated_averaging = tff.learning.build_federated_averaging_process(
model_fn=tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.05),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))

# utility function
def format_size(size):
size = float(size)
for unit in ['bit','Kibit','Mibit','Gibit']:
if size < 1024.0:
return "{size:3.2f}{unit}".format(size=size, unit=unit)
size /= 1024.0
return "{size:.2f}{unit}".format(size=size, unit='TiB')

def set_sizing_environment():
sizing_factory = tff.framework.sizing_executor_factory()
context = tff.framework.ExecutionContext(executor_fn=sizing_factory)
tff.framework.set_default_context(context)

return sizing_factory

def train(federated_averaging_process, num_rounds, num_clients_per_round, summary_writer):
environment = set_sizing_environment()

# Initialize the Federated Averaging algorithm to get the initial server state.
state = federated_averaging_process.initialize()

with summary_writer.as_default():
for round_num in range(num_rounds):
# Sample the clients parcitipated in this round.
sampled_clients = np.random.choice(
cifar_train.client_ids,
size=num_clients_per_round,
replace=False)
# Create a list of `tf.Dataset` instances from the data of sampled clients.
sampled_train_data = [
cifar_train.create_tf_dataset_for_client(client)
for client in sampled_clients
]
state, metrics = federated_averaging_process.next(state, sampled_train_data)

size_info = environment.get_size_info()
broadcasted_bits = size_info.broadcast_bits[-1]
aggregated_bits = size_info.aggregate_bits[-1]

print('round {:2d}, metrics={}, broadcasted_bits={}, aggregated_bits={}'.format(round_num, metrics, format_size(broadcasted_bits), format_size(aggregated_bits)))

# Add metrics to Tensorboard.
for name, value in metrics['train'].items():
tf.summary.scalar(name, value, step=round_num)

# Add broadcasted and aggregated data size to Tensorboard.
tf.summary.scalar('cumulative_broadcasted_bits', broadcasted_bits, step=round_num)
tf.summary.scalar('cumulative_aggregated_bits', aggregated_bits, step=round_num)
summary_writer.flush()

# Clean the log directory to avoid conflicts.
try:
tf.io.gfile.rmtree('/tmp/logs/scalars')
except tf.errors.OpError as e:
pass # Path doesn't exist

# Set up the log directory and writer for Tensorboard.
logdir = "/tmp/logs/scalars/original/"
summary_writer = tf.summary.create_file_writer(logdir)

train(federated_averaging_process=federated_averaging, num_rounds=10,
num_clients_per_round=100, summary_writer=summary_writer)

这是输出:

round  0, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0299), ('loss', 15.586388), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=6.56Gibit, aggregated_bits=6.56Gibit
round 1, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0046), ('loss', 16.042076), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=13.13Gibit, aggregated_bits=13.13Gibit
round 2, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0107), ('loss', 15.945647), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=19.69Gibit, aggregated_bits=19.69Gibit
round 3, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0104), ('loss', 15.950482), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=26.26Gibit, aggregated_bits=26.26Gibit
round 4, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0115), ('loss', 15.932754), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=32.82Gibit, aggregated_bits=32.82Gibit
round 5, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0111), ('loss', 15.9391985), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=39.39Gibit, aggregated_bits=39.39Gibit
round 6, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0112), ('loss', 15.937586), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=45.95Gibit, aggregated_bits=45.95Gibit
round 7, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.012), ('loss', 15.924692), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=52.52Gibit, aggregated_bits=52.52Gibit
round 8, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0105), ('loss', 15.948869), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=59.08Gibit, aggregated_bits=59.08Gibit
round 9, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0096), ('loss', 15.963377), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=65.64Gibit, aggregated_bits=65.64Gibit

这是输入结构:

OrderedDict([('coarse_label', TensorSpec(shape=(), dtype=tf.int64, name=None)), ('image', TensorSpec(shape=(32, 32, 3), dtype=tf.uint8, name=None)), ('label', TensorSpec(shape=(), dtype=tf.int64, name=None))])

我不知道我的错误在哪里!

  • create_original_fedavg_cnn_model() 层中定义的超参数是否错误?还是在 preprocess_train_dataset() 中?

  • 如何为 CIFAR100 数据集调参?

感谢任何帮助!谢谢。

最佳答案

这里有几点说明:

  1. 由于它似乎是训练(虽然不是很好),一些最显着的超参数将是优化器的学习率。您可能想在那里尝试其他超参数,甚至其他优化器。

  2. 您使用的 CNN 模型非常小,可能在 CIFAR-100 上的整体表现不佳。一件有用的事情是首先尝试以集中方式在数据集上训练模型(作为一致性检查),然后继续进行联合训练。

  3. 关于如何初始化超参数设置的一个很好的经验法则是采用在集中训练中运行良好的优化器/超参数(参见要点 2),并将它们用作客户端优化器,同时保留服务器优化器作为学习率为 1 的 SGD。这可能不是最优的,但通常可以做得很好。

不幸的是,模型训练仍然是一门艺术,而不是一门科学,联邦训练可能不同于集中训练。因此,可能需要进行一些反复试验。祝你好运。

关于python - 如何在不降低精度的情况下调整 tensorflow_federated TFF 中 CIFAR100 的超参数?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/72179718/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com