- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我尝试使用 tf 2.0 中的批量归一化层创建一个类,但是它给了我一个错误,即变量不存在梯度。我尝试直接使用批量归一化,但它也给了我同样的错误。看起来它没有训练与批量归一化步骤相关的变量。
我尝试使用 model.trainable_variables 而不是 model.variables,但它也不起作用。
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy import ndimage
learning_rate = 0.001
training_epochs = 15
batch_size = 100
tf.random.set_seed(777)
cur_dir = os.getcwd()
ckpt_dir_name = 'checkpoints'
model_dir_name = 'minst_cnn_best'
checkpoint_dir = os.path.join(cur_dir, ckpt_dir_name, model_dir_name)
os.makedirs(checkpoint_dir, exist_ok=True)
checkpoint_prefix = os.path.join(checkpoint_dir, model_dir_name)
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.astype(np.float32) /255.
test_images = test_images.astype(np.float32) /255.
print(train_images.shape, test_images.shape)
train_images = np.expand_dims(train_images, axis = -1)
test_images = np.expand_dims(test_images, axis = -1)
print(train_images.shape, test_images.shape)
train_labels = to_categorical(train_labels, 10)
test_labels = to_categorical(test_labels, 10)
train_dataset = tf.data.Dataset.from_tensor_slices((train_images,
train_labels)).shuffle(buffer_size = 100000).batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((test_images,
test_labels)).batch(batch_size)
class ConvBNRelu(tf.keras.Model):
def __init__(self, filters, kernel_size=3, strides=1, padding='SAME'):
super(ConvBNRelu, self).__init__()
self.conv = keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides,
padding=padding, kernel_initializer='glorot_normal')
self.batchnorm = tf.keras.layers.BatchNormalization()
def call(self, inputs, training=False):
layer = self.conv(inputs)
layer = self.batchnorm(layer)
layer = tf.nn.relu(layer)
return layer
class DenseBNRelu(tf.keras.Model):
def __init__(self, units):
super(DenseBNRelu, self).__init__()
self.dense = keras.layers.Dense(units=units, kernel_initializer='glorot_normal')
self.batchnorm = tf.keras.layers.BatchNormalization()
def call(self, inputs, training=False):
layer = self.dense(inputs)
layer = self.batchnorm(layer)
layer = tf.nn.relu(layer)
return layer
class MNISTModel(tf.keras.Model):
def __init__(self):
super(MNISTModel, self).__init__()
self.conv1 = ConvBNRelu(filters=32, kernel_size=[3, 3], padding='SAME')
self.pool1 = keras.layers.MaxPool2D(padding='SAME')
self.conv2 = ConvBNRelu(filters=64, kernel_size=[3, 3], padding='SAME')
self.pool2 = keras.layers.MaxPool2D(padding='SAME')
self.conv3 = ConvBNRelu(filters=128, kernel_size=[3, 3], padding='SAME')
self.pool3 = keras.layers.MaxPool2D(padding='SAME')
self.pool3_flat = keras.layers.Flatten()
self.dense4 = DenseBNRelu(units=256)
self.drop4 = keras.layers.Dropout(rate=0.4)
self.dense5 = keras.layers.Dense(units=10, kernel_initializer='glorot_normal')
def call(self, inputs, training=False):
net = self.conv1(inputs)
net = self.pool1(net)
net = self.conv2(net)
net = self.pool2(net)
net = self.conv3(net)
net = self.pool3(net)
net = self.pool3_flat(net)
net = self.dense4(net)
net = self.drop4(net)
net = self.dense5(net)
return net
models = []
num_models = 5
for m in range(num_models):
models.append(MNISTModel())
def loss_fn(model, images, labels):
logits = model(images, training=True)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=labels))
return loss
def grad(model, images, labels):
with tf.GradientTape() as tape:
loss = loss_fn(model, images, labels)
return tape.gradient(loss, model.variables)
def evaluate(models, images, labels):
predictions = np.zeros_like(labels)
for model in models:
logits = model(images, training=False)
predictions += logits
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
optimizer = keras.optimizers.Adam(learning_rate = learning_rate)
checkpoints = []
for m in range(num_models):
checkpoints.append(tf.train.Checkpoint(cnn=models[m]))
for epoch in range(training_epochs):
avg_loss = 0.
avg_train_acc = 0.
avg_test_acc = 0.
train_step = 0
test_step = 0
for images, labels in train_dataset:
for model in models:
grads = grad(model, images, labels)
optimizer.apply_gradients(zip(grads, model.variables))
loss = loss_fn(model, images, labels)
avg_loss += loss / num_models
acc = evaluate(models, images, labels)
avg_train_acc += acc
train_step += 1
avg_loss = avg_loss / train_step
avg_train_acc = avg_train_acc / train_step
for images, labels in test_dataset:
acc = evaluate(models, images, labels)
avg_test_acc += acc
test_step += 1
avg_test_acc = avg_test_acc / test_step
print('Epoch:', '{}'.format(epoch + 1), 'loss =', '{:.8f}'.format(avg_loss),
'train accuracy = ', '{:.4f}'.format(avg_train_acc),
'test accuracy = ', '{:.4f}'.format(avg_test_acc))
for idx, checkpoint in enumerate(checkpoints):
checkpoint.save(file_prefix=checkpoint_prefix+'-{}'.format(idx))
print('Learning Finished!')
W0727 20:27:05.344142 140332288718656 Optimizer_v2.py:982] 变量不存在梯度 ['mnist_model/conv_bn_relu/batch_normalization/moving_mean:0', 'mnist_model/conv_bn_relu/batch_normalization/moving_variance:0', 'm nist_模型/conv_bn_relu_1/batch_normalization_1/moving_mean:0', 'mnist_model/conv_bn_relu_1/batch_normalization_1/moving_variance:0', 'mnist_model/conv_bn_relu_2/batch_normalization_2/moving_mean:0', 'mnist_model/conv_bn_relu_2/batch_normalization_2/moving_vari ance:0', 'mnist_model/dense_bn_relu/batch_normalization_3/moving_mean:0', 'mnist_model/dense_bn_relu/batch_normalization_3/moving_variance:0'] 最小化损失时。W0727 20:27:05.407717 140332288718656 deprecation.py:323]来自/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py:460:BaseResourceVariable.constraint(来自tensorflow。 python.ops.resource_variable_ops) 已弃用,并将在未来版本中删除。更新说明:在优化器更新步骤之后手动应用约束。W0727 20:27:05.499249 140332288718656 Optimizer_v2.py:982]变量不存在梯度['mnist_model_1/conv_bn_relu_3/batch_normalization_4/moving_mean:0','mnist_model_1/conv_bn_relu_3/batch_normalization_4/moving_方差:0','mnist_model_1/conv_bn_relu_4/batch_normalization_5/moving_mean:0', 'mnist_model_1/conv_bn_relu_4/batch_normalization_5/moving_variance:0', 'mnist_model_1/conv_bn_relu_5/batch_normalization_6/moving_mean:0', 'mnist_model_1/conv_bn_relu_5/batch_normalization_6/moving_variance:0', 'mnist_ model_1/dense_bn_relu_1/batch_normalization_7/moving_mean :0', 'mnist_model_1/dense_bn_relu_1/batch_normalization_7/moving_variance:0'] 最小化损失时。...
最佳答案
您正在计算相对于 model.variables
的损失梯度:该集合不仅包含可训练变量(模型权重),还包含不可训练变量,例如移动变量由批量归一化层计算的均值和方差。
您必须计算相对于trainable_variables
的梯度。简而言之,改变线条
return tape.gradient(loss, model.variables)
和
optimizer.apply_gradients(zip(grads, model.variables))
至
return tape.gradient(loss, model.trainable_variables)
和
optimizer.apply_gradients(zip(grads, model.trainable_variables))
关于python-3.x - TF 2.0错误: Gradients does not exist for variables during training using gradienttape,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/57235909/
问题故障解决记录 -- Java RMI Connection refused to host: x.x.x.x .... 在学习JavaRMI时,我遇到了以下情况 问题原因:可
我正在玩 Rank-N-type 并尝试输入 x x .但我发现这两个函数可以以相同的方式输入,这很不直观。 f :: (forall a b. a -> b) -> c f x = x x g ::
这个问题已经有答案了: How do you compare two version Strings in Java? (31 个回答) 已关闭 8 年前。 有谁知道如何在Java中比较两个版本字符串
这个问题已经有答案了: How do the post increment (i++) and pre increment (++i) operators work in Java? (14 个回答)
下面是带有 -n 和 -r 选项的 netstat 命令的输出,其中目标字段显示压缩地址 (127.1/16)。我想知道 netstat 命令是否有任何方法或选项可以显示整个目标 IP (127.1.
我知道要证明 : (¬ ∀ x, p x) → (∃ x, ¬ p x) 证明是: theorem : (¬ ∀ x, p x) → (∃ x, ¬ p x) := begin intro n
x * x 如何通过将其存储在“auto 变量”中来更改?我认为它应该仍然是相同的,并且我的测试表明类型、大小和值显然都是相同的。 但即使 x * x == (xx = x * x) 也是错误的。什么
假设,我们这样表达: someIQueryable.Where(x => x.SomeBoolProperty) someIQueryable.Where(x => !x.SomeBoolProper
我有一个字符串 1234X5678 我使用这个正则表达式来匹配模式 .X|..X|X. 我得到了 34X 问题是为什么我没有得到 4X 或 X5? 为什么正则表达式选择执行第二种模式? 最佳答案 这里
我的一个 friend 在面试时遇到了这个问题 找到使该函数返回真值的 x 值 function f(x) { return (x++ !== x) && (x++ === x); } 面试官
这个问题在这里已经有了答案: 10年前关闭。 Possible Duplicate: Isn't it easier to work with foo when it is represented b
我是 android 的新手,我一直在练习开发一个针对 2.2 版本的应用程序,我需要帮助了解如何将我的应用程序扩展到其他版本,即 1.x、2.3.x、3 .x 和 4.x.x,以及一些针对屏幕分辨率
为什么案例 1 给我们 :error: TypeError: x is undefined on line... //case 1 var x; x.push(x); console.log(x);
代码优先: # CASE 01 def test1(x): x += x print x l = [100] test1(l) print l CASE01 输出: [100, 100
我正在努力温习我的大计算。如果我有将所有项目移至 'i' 2 个空格右侧的函数,我有一个如下所示的公式: (n -1) + (n - 2) + (n - 3) ... (n - n) 第一次迭代我必须
给定 IP 字符串(如 x.x.x.x/x),我如何或将如何计算 IP 的范围最常见的情况可能是 198.162.1.1/24但可以是任何东西,因为法律允许的任何东西。 我要带198.162.1.1/
在我作为初学者努力编写干净的 Javascript 代码时,我最近阅读了 this article当我偶然发现这一段时,关于 JavaScript 中的命名空间: The code at the ve
我正在编写一个脚本,我希望避免污染 DOM 的其余部分,它将是一个用于收集一些基本访问者分析数据的第 3 方脚本。 我通常使用以下内容创建一个伪“命名空间”: var x = x || {}; 我正在
我尝试运行我的test_container_services.py套件,但遇到了以下问题: docker.errors.APIError:500服务器错误:内部服务器错误(“ b'{” message
是否存在这两个 if 语句会产生不同结果的情况? if(x as X != null) { // Do something } if(x is X) { // Do something } 编
我是一名优秀的程序员,十分优秀!