gpt4 book ai didi

python - tensorflow 批量归一化

转载 作者:太空宇宙 更新时间:2023-11-03 16:02:46 25 4
gpt4 key购买 nike

下面是我在 Tensorflow 中学习编程时使用的代码。

from __future__ import print_function
from datetime import datetime
import time, os

import tensorflow as tf

# Import data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

# Parameters
learning_rate = 0.001
training_epoch = 5
batch_size = 128
display_step = 10
model_path = "./output/model.ckpt"
logs_path = './logs'

directory = os.path.dirname(model_path)
if not os.path.exists(directory):
os.makedirs(directory)

directory = os.path.dirname(logs_path)
if not os.path.exists(directory):
os.makedirs(directory)

# Network Parameters
n_input = 784 # data input
n_classes = 10 # classes
dropout = 0.5 # Dropout, probability to keep units
l2_regularization_strength = 0.0005 #l2 regularization strength

# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input], name='InputData')
y = tf.placeholder(tf.float32, [None, n_classes], name='LabelData')
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
mode = tf.placeholder(tf.int32);

# Create some wrappers for simplicity
def conv2d(x, kernel_shape, strides=1, mode=0):
# Conv2D wrapper, with batch normalization and relu activation
weights = tf.get_variable('weights', kernel_shape, initializer=tf.contrib.layers.xavier_initializer())

x = tf.nn.conv2d(x, weights, strides=[1, strides, strides, 1], padding='SAME')
pop_mean = tf.get_variable('bn_pop_mean', [x.get_shape()[-1]], initializer=tf.constant_initializer(0), trainable=False)
pop_var = tf.get_variable('bn_pop_var', [x.get_shape()[-1]], initializer=tf.constant_initializer(1), trainable=False)
scale = tf.get_variable('bn_scale', [x.get_shape()[-1]], initializer=tf.constant_initializer(1))
beta = tf.get_variable('bn_beta', [x.get_shape()[-1]], initializer=tf.constant_initializer(0))
epsilon = 1e-3
decay = 0.999
if mode == 0:
batch_mean, batch_var = tf.nn.moments(x,[0, 1, 2])
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
bn = tf.nn.batch_normalization(x, batch_mean, batch_var, beta, scale, epsilon, name='bn')
else:
bn = tf.nn.batch_normalization(x, pop_mean, pop_var, beta, scale, epsilon, name='bn')

return tf.nn.relu(bn, name = 'relu')


def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name='maxpool')

# Create model
def conv_net(x, dropout, mode):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 28, 28, 1])

with tf.variable_scope("conv1"):
# Convolution Layer
conv1 = conv2d(x, [5, 5, 1, 32], mode=mode)
# Max Pooling (down-sampling)
conv1 = maxpool2d(conv1, k=2)

with tf.variable_scope("conv2"):
# Convolution Layer
conv2 = conv2d(conv1, [5, 5, 32, 64], mode=mode)
# Max Pooling (down-sampling)
conv2 = maxpool2d(conv2, k=2)

with tf.variable_scope("fc1"):
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
weights = tf.get_variable("weights", [7*7*64, 1024], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable("biases", [1024], initializer=tf.constant_initializer(0.0))
fc1 = tf.reshape(conv2, [-1, weights.get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights), biases)
fc1 = tf.nn.relu(fc1, name = 'relu')

# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout, name='dropout')

with tf.variable_scope("output"):
# Output, class prediction
weights = tf.get_variable("weights", [1024, n_classes], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable("biases", [n_classes], initializer=tf.constant_initializer(0.0))
out = tf.add(tf.matmul(fc1, weights), biases)
return out

with tf.name_scope('Model'):
# Construct model
pred = conv_net(x, keep_prob, mode)

with tf.name_scope('Loss'):
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))

vars = tf.trainable_variables()
l2_regularization = tf.add_n([tf.nn.l2_loss(v) for v in vars if any(x in v.name for x in ['weights', 'biases'])])
for v in vars:
if any(x in v.name for x in ['weights', 'biases']):
print(v.name + '-included!')
else:
print(v.name)

cost += l2_regularization_strength*l2_regularization

with tf.name_scope('Optimizer'):
# Define optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)

# Op to calculate every variable gradient
grads = tf.gradients(cost, tf.trainable_variables())
grads = list(zip(grads, tf.trainable_variables()))

# Op to update all variables according to their gradient
apply_grads = optimizer.apply_gradients(grads_and_vars=grads)

with tf.name_scope('Accuracy'):
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.initialize_all_variables()

# Create a summary to monitor cost tensor
tf.scalar_summary('cost', cost)
# Create a summary to monitor l2_regularization tensor
tf.scalar_summary('l2_regularization', l2_regularization)
# Create a summary to monitor accuracy tensor
tf.scalar_summary('accuracy', accuracy)
# Create summaries to visualize weights
for var in tf.trainable_variables():
tf.histogram_summary(var.name, var)
for var in tf.all_variables():
if 'bn_pop' in var.name:
tf.histogram_summary(var.name, var)
# Summarize all gradients
for grad, var in grads:
tf.histogram_summary(var.name + '/gradient', grad)
# Merge all summaries into a single op
merged_summary_op = tf.merge_all_summaries()

# 'Saver' op to save and restore all the variables
saver = tf.train.Saver()

# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1

# op to write logs to Tensorboard
summary_writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph())

# Keep training until reach max epoch
while step * batch_size < training_epoch * mnist.train.num_examples:
start_time = time.time()
# Get barch
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop)
sess.run(apply_grads, feed_dict={x: batch_x, y: batch_y, keep_prob: dropout, mode: 0})
duration = time.time() - start_time
if step % display_step == 0:
# Calculate batch loss and accuracy
loss, acc, summary = sess.run([cost, accuracy, merged_summary_op], feed_dict={x: batch_x,
y: batch_y,
keep_prob: 1.,
mode: 1})

# Write logs at every iteration
summary_writer.add_summary(summary, step)

# Calculate number sample per sec
samples_per_sec = batch_size / duration
format_str = ('%s: Iter %d, Epoch %d, (%.1f examples/sec; %.3f sec/batch), Minibatch Loss = %.5f , Training Accuracy=%.5f')
print (format_str % (datetime.now(), step*batch_size, int(step*batch_size/mnist.train.num_examples) + 1, samples_per_sec, float(duration), loss, acc))
step += 1
print("Optimization Finished!")

# Calculate accuracy for 256 mnist test images
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: mnist.test.images[:5000],
y: mnist.test.labels[:5000],
keep_prob: 1.,
mode: 2}))

# Save model weights to disk
save_path = saver.save(sess, model_path)
print("Model saved in file: %s" % save_path)

当我打开张量板并查看直方图和分布 session 时,“conv1”和“conv2”中的“bn_pop_mean”和“bn_pop_var”没有更新(它们保持初始值不变)。

虽然训练后我的准确率达到了 97% 左右,但我不知道批量归一化是否有效。

最佳答案

在您的 conv_net 函数中,您没有为 tf.variable_scope() 设置“重用”参数。 “重用”的默认设置是“无”。每次调用 conv2d 函数时,“bn_pop_mean”和“bn_pop_var”都会重新初始化。

关于python - tensorflow 批量归一化,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/40166236/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com