- android - 多次调用 OnPrimaryClipChangedListener
- android - 无法更新 RecyclerView 中的 TextView 字段
- android.database.CursorIndexOutOfBoundsException : Index 0 requested, 光标大小为 0
- android - 使用 AppCompat 时,我们是否需要明确指定其 UI 组件(Spinner、EditText)颜色
下面是我在 Tensorflow 中学习编程时使用的代码。
from __future__ import print_function
from datetime import datetime
import time, os
import tensorflow as tf
# Import data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.001
training_epoch = 5
batch_size = 128
display_step = 10
model_path = "./output/model.ckpt"
logs_path = './logs'
directory = os.path.dirname(model_path)
if not os.path.exists(directory):
os.makedirs(directory)
directory = os.path.dirname(logs_path)
if not os.path.exists(directory):
os.makedirs(directory)
# Network Parameters
n_input = 784 # data input
n_classes = 10 # classes
dropout = 0.5 # Dropout, probability to keep units
l2_regularization_strength = 0.0005 #l2 regularization strength
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input], name='InputData')
y = tf.placeholder(tf.float32, [None, n_classes], name='LabelData')
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
mode = tf.placeholder(tf.int32);
# Create some wrappers for simplicity
def conv2d(x, kernel_shape, strides=1, mode=0):
# Conv2D wrapper, with batch normalization and relu activation
weights = tf.get_variable('weights', kernel_shape, initializer=tf.contrib.layers.xavier_initializer())
x = tf.nn.conv2d(x, weights, strides=[1, strides, strides, 1], padding='SAME')
pop_mean = tf.get_variable('bn_pop_mean', [x.get_shape()[-1]], initializer=tf.constant_initializer(0), trainable=False)
pop_var = tf.get_variable('bn_pop_var', [x.get_shape()[-1]], initializer=tf.constant_initializer(1), trainable=False)
scale = tf.get_variable('bn_scale', [x.get_shape()[-1]], initializer=tf.constant_initializer(1))
beta = tf.get_variable('bn_beta', [x.get_shape()[-1]], initializer=tf.constant_initializer(0))
epsilon = 1e-3
decay = 0.999
if mode == 0:
batch_mean, batch_var = tf.nn.moments(x,[0, 1, 2])
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
bn = tf.nn.batch_normalization(x, batch_mean, batch_var, beta, scale, epsilon, name='bn')
else:
bn = tf.nn.batch_normalization(x, pop_mean, pop_var, beta, scale, epsilon, name='bn')
return tf.nn.relu(bn, name = 'relu')
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name='maxpool')
# Create model
def conv_net(x, dropout, mode):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 28, 28, 1])
with tf.variable_scope("conv1"):
# Convolution Layer
conv1 = conv2d(x, [5, 5, 1, 32], mode=mode)
# Max Pooling (down-sampling)
conv1 = maxpool2d(conv1, k=2)
with tf.variable_scope("conv2"):
# Convolution Layer
conv2 = conv2d(conv1, [5, 5, 32, 64], mode=mode)
# Max Pooling (down-sampling)
conv2 = maxpool2d(conv2, k=2)
with tf.variable_scope("fc1"):
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
weights = tf.get_variable("weights", [7*7*64, 1024], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable("biases", [1024], initializer=tf.constant_initializer(0.0))
fc1 = tf.reshape(conv2, [-1, weights.get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights), biases)
fc1 = tf.nn.relu(fc1, name = 'relu')
# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout, name='dropout')
with tf.variable_scope("output"):
# Output, class prediction
weights = tf.get_variable("weights", [1024, n_classes], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable("biases", [n_classes], initializer=tf.constant_initializer(0.0))
out = tf.add(tf.matmul(fc1, weights), biases)
return out
with tf.name_scope('Model'):
# Construct model
pred = conv_net(x, keep_prob, mode)
with tf.name_scope('Loss'):
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
vars = tf.trainable_variables()
l2_regularization = tf.add_n([tf.nn.l2_loss(v) for v in vars if any(x in v.name for x in ['weights', 'biases'])])
for v in vars:
if any(x in v.name for x in ['weights', 'biases']):
print(v.name + '-included!')
else:
print(v.name)
cost += l2_regularization_strength*l2_regularization
with tf.name_scope('Optimizer'):
# Define optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# Op to calculate every variable gradient
grads = tf.gradients(cost, tf.trainable_variables())
grads = list(zip(grads, tf.trainable_variables()))
# Op to update all variables according to their gradient
apply_grads = optimizer.apply_gradients(grads_and_vars=grads)
with tf.name_scope('Accuracy'):
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
# Create a summary to monitor cost tensor
tf.scalar_summary('cost', cost)
# Create a summary to monitor l2_regularization tensor
tf.scalar_summary('l2_regularization', l2_regularization)
# Create a summary to monitor accuracy tensor
tf.scalar_summary('accuracy', accuracy)
# Create summaries to visualize weights
for var in tf.trainable_variables():
tf.histogram_summary(var.name, var)
for var in tf.all_variables():
if 'bn_pop' in var.name:
tf.histogram_summary(var.name, var)
# Summarize all gradients
for grad, var in grads:
tf.histogram_summary(var.name + '/gradient', grad)
# Merge all summaries into a single op
merged_summary_op = tf.merge_all_summaries()
# 'Saver' op to save and restore all the variables
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# op to write logs to Tensorboard
summary_writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph())
# Keep training until reach max epoch
while step * batch_size < training_epoch * mnist.train.num_examples:
start_time = time.time()
# Get barch
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop)
sess.run(apply_grads, feed_dict={x: batch_x, y: batch_y, keep_prob: dropout, mode: 0})
duration = time.time() - start_time
if step % display_step == 0:
# Calculate batch loss and accuracy
loss, acc, summary = sess.run([cost, accuracy, merged_summary_op], feed_dict={x: batch_x,
y: batch_y,
keep_prob: 1.,
mode: 1})
# Write logs at every iteration
summary_writer.add_summary(summary, step)
# Calculate number sample per sec
samples_per_sec = batch_size / duration
format_str = ('%s: Iter %d, Epoch %d, (%.1f examples/sec; %.3f sec/batch), Minibatch Loss = %.5f , Training Accuracy=%.5f')
print (format_str % (datetime.now(), step*batch_size, int(step*batch_size/mnist.train.num_examples) + 1, samples_per_sec, float(duration), loss, acc))
step += 1
print("Optimization Finished!")
# Calculate accuracy for 256 mnist test images
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: mnist.test.images[:5000],
y: mnist.test.labels[:5000],
keep_prob: 1.,
mode: 2}))
# Save model weights to disk
save_path = saver.save(sess, model_path)
print("Model saved in file: %s" % save_path)
当我打开张量板并查看直方图和分布 session 时,“conv1”和“conv2”中的“bn_pop_mean”和“bn_pop_var”没有更新(它们保持初始值不变)。
虽然训练后我的准确率达到了 97% 左右,但我不知道批量归一化是否有效。
最佳答案
在您的 conv_net 函数中,您没有为 tf.variable_scope() 设置“重用”参数。 “重用”的默认设置是“无”。每次调用 conv2d 函数时,“bn_pop_mean”和“bn_pop_var”都会重新初始化。
关于python - tensorflow 批量归一化,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/40166236/
我想将模型及其各自训练的权重从 tensorflow.js 转换为标准 tensorflow,但无法弄清楚如何做到这一点,tensorflow.js 的文档对此没有任何说明 我有一个 manifest
我有一个运行良好的 TF 模型,它是用 Python 和 TFlearn 构建的。有没有办法在另一个系统上运行这个模型而不安装 Tensorflow?它已经经过预训练,所以我只需要通过它运行数据。 我
当执行 tensorflow_model_server 二进制文件时,它需要一个模型名称命令行参数,model_name。 如何在训练期间指定模型名称,以便在运行 tensorflow_model_s
我一直在 R 中使用标准包进行生存分析。我知道如何在 TensorFlow 中处理分类问题,例如逻辑回归,但我很难将其映射到生存分析问题。在某种程度上,您有两个输出向量而不是一个输出向量(time_t
Torch7 has a library for generating Gaussian Kernels在一个固定的支持。 Tensorflow 中有什么可比的吗?我看到 these distribu
在Keras中我们可以简单的添加回调,如下所示: self.model.fit(X_train,y_train,callbacks=[Custom_callback]) 回调在doc中定义,但我找不到
我正在寻找一种在 tensorflow 中有条件打印节点的方法,使用下面的示例代码行,其中每 10 个循环计数,它应该在控制台中打印一些东西。但这对我不起作用。谁能建议? 谢谢,哈米德雷萨, epsi
我想使用 tensorflow object detection API 创建我自己的 .tfrecord 文件,并将它们用于训练。该记录将是原始数据集的子集,因此模型将仅检测特定类别。我不明白也无法
我在 TensorFlow 中训练了一个聊天机器人,想保存模型以便使用 TensorFlow.js 将其部署到 Web。我有以下内容 checkpoint = "./chatbot_weights.c
我最近开始学习 Tensorflow,特别是我想使用卷积神经网络进行图像分类。我一直在看官方仓库中的android demo,特别是这个例子:https://github.com/tensorflow
我目前正在研究单图像超分辨率,并且我设法卡住了现有的检查点文件并将其转换为 tensorflow lite。但是,使用 .tflite 文件执行推理时,对一张图像进行上采样所需的时间至少是使用 .ck
我注意到 tensorflow 的 api 中已经有批量标准化函数。我不明白的一件事是如何更改训练和测试之间的程序? 批量归一化在测试和训练期间的作用不同。具体来说,在训练期间使用固定的均值和方差。
我创建了一个模型,该模型将 Mobilenet V2 应用于 Google colab 中的卷积基础层。然后我使用这个命令转换它: path_to_h5 = working_dir + '/Tenso
代码取自:- http://adventuresinmachinelearning.com/python-tensorflow-tutorial/ import tensorflow as tf fr
好了,所以我准备在Tensorflow中运行 tf.nn.softmax_cross_entropy_with_logits() 函数。 据我了解,“logit”应该是概率的张量,每个对应于某个像素的
tensorflow 服务构建依赖于大型 tensorflow ;但我已经成功构建了 tensorflow。所以我想用它。我做这些事情:我更改了 tensorflow 服务 WORKSPACE(org
Tensoflow 嵌入层 ( https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding ) 易于使用, 并且有大量的文
我正在尝试使用非常大的数据集(比我的内存大得多)训练 Tensorflow 模型。 为了充分利用所有可用的训练数据,我正在考虑将它们分成几个小的“分片”,并一次在一个分片上进行训练。 经过一番研究,我
根据 Sutton 的书 - Reinforcement Learning: An Introduction,网络权重的更新方程为: 其中 et 是资格轨迹。 这类似于带有额外 et 的梯度下降更新。
如何根据条件选择执行图表的一部分? 我的网络有一部分只有在 feed_dict 中提供占位符值时才会执行.如果未提供该值,则采用备用路径。我该如何使用 tensorflow 来实现它? 以下是我的代码
我是一名优秀的程序员,十分优秀!