- c - 在位数组中找到第一个零
- linux - Unix 显示有关匹配两种模式之一的文件的信息
- 正则表达式替换多个文件
- linux - 隐藏来自 xtrace 的命令
我是 Tensorflow 的新手,正在尝试实现生成对抗网络。我正在关注 this同样的教程,我们正在尝试使用生成模型生成像图像这样的 MNIST 数据集。但是,该代码似乎使用的是旧版本的 TensorFlow (< 1.0),因此会出现以下错误:
line: trainerD = tf.train.AdamOptimizer().minimize(d_loss, var_list=d_vars)
ValueError: Variable d_wconv1/Adam/ does not exist, or was not createdwith tf.get_variable(). Did you mean to set reuse=None in VarScope?
其代码如下:
import tensorflow as tf
import random
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x_train = mnist.train.images[:55000,:]
#print (x_train.shape)
#randomNum = random.randint(0,55000)
#image = x_train[randomNum].reshape([28,28])
#plt.imshow(image, cmap=plt.get_cmap('gray_r'))
#plt.show()
def conv2d(x, W):
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME')
def avg_pool_2x2(x):
return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def discriminator(x_image, reuse=False):
if (reuse):
tf.get_variable_scope().reuse_variables()
#First Conv and Pool Layers
W_conv1 = tf.get_variable('d_wconv1', [5, 5, 1, 8], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_conv1 = tf.get_variable('d_bconv1', [8], initializer=tf.constant_initializer(0))
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = avg_pool_2x2(h_conv1)
#Second Conv and Pool Layers
W_conv2 = tf.get_variable('d_wconv2', [5, 5, 8, 16], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_conv2 = tf.get_variable('d_bconv2', [16], initializer=tf.constant_initializer(0))
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = avg_pool_2x2(h_conv2)
#First Fully Connected Layer
W_fc1 = tf.get_variable('d_wfc1', [7 * 7 * 16, 32], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_fc1 = tf.get_variable('d_bfc1', [32], initializer=tf.constant_initializer(0))
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*16])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
#Second Fully Connected Layer
W_fc2 = tf.get_variable('d_wfc2', [32, 1], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_fc2 = tf.get_variable('d_bfc2', [1], initializer=tf.constant_initializer(0))
#Final Layer
y_conv=(tf.matmul(h_fc1, W_fc2) + b_fc2)
return y_conv
def generator(z, batch_size, z_dim, reuse=False):
if (reuse):
tf.get_variable_scope().reuse_variables()
g_dim = 64 #Number of filters of first layer of generator
c_dim = 1 #Color dimension of output (MNIST is grayscale, so c_dim = 1 for us)
s = 28 #Output size of the image
s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16) #We want to slowly upscale the image, so these values will help
#make that change gradual.
h0 = tf.reshape(z, [batch_size, s16+1, s16+1, 25])
h0 = tf.nn.relu(h0)
#Dimensions of h0 = batch_size x 2 x 2 x 25
#First DeConv Layer
output1_shape = [batch_size, s8, s8, g_dim*4]
W_conv1 = tf.get_variable('g_wconv1', [5, 5, output1_shape[-1], int(h0.get_shape()[-1])],
initializer=tf.truncated_normal_initializer(stddev=0.1))
b_conv1 = tf.get_variable('g_bconv1', [output1_shape[-1]], initializer=tf.constant_initializer(.1))
H_conv1 = tf.nn.conv2d_transpose(h0, W_conv1, output_shape=output1_shape, strides=[1, 2, 2, 1], padding='SAME')
H_conv1 = tf.contrib.layers.batch_norm(inputs = H_conv1, center=True, scale=True, is_training=True, scope="g_bn1")
H_conv1 = tf.nn.relu(H_conv1)
#Dimensions of H_conv1 = batch_size x 3 x 3 x 256
#Second DeConv Layer
output2_shape = [batch_size, s4 - 1, s4 - 1, g_dim*2]
W_conv2 = tf.get_variable('g_wconv2', [5, 5, output2_shape[-1], int(H_conv1.get_shape()[-1])],
initializer=tf.truncated_normal_initializer(stddev=0.1))
b_conv2 = tf.get_variable('g_bconv2', [output2_shape[-1]], initializer=tf.constant_initializer(.1))
H_conv2 = tf.nn.conv2d_transpose(H_conv1, W_conv2, output_shape=output2_shape, strides=[1, 2, 2, 1], padding='SAME')
H_conv2 = tf.contrib.layers.batch_norm(inputs = H_conv2, center=True, scale=True, is_training=True, scope="g_bn2")
H_conv2 = tf.nn.relu(H_conv2)
#Dimensions of H_conv2 = batch_size x 6 x 6 x 128
#Third DeConv Layer
output3_shape = [batch_size, s2 - 2, s2 - 2, g_dim*1]
W_conv3 = tf.get_variable('g_wconv3', [5, 5, output3_shape[-1], int(H_conv2.get_shape()[-1])],
initializer=tf.truncated_normal_initializer(stddev=0.1))
b_conv3 = tf.get_variable('g_bconv3', [output3_shape[-1]], initializer=tf.constant_initializer(.1))
H_conv3 = tf.nn.conv2d_transpose(H_conv2, W_conv3, output_shape=output3_shape, strides=[1, 2, 2, 1], padding='SAME')
H_conv3 = tf.contrib.layers.batch_norm(inputs = H_conv3, center=True, scale=True, is_training=True, scope="g_bn3")
H_conv3 = tf.nn.relu(H_conv3)
#Dimensions of H_conv3 = batch_size x 12 x 12 x 64
#Fourth DeConv Layer
output4_shape = [batch_size, s, s, c_dim]
W_conv4 = tf.get_variable('g_wconv4', [5, 5, output4_shape[-1], int(H_conv3.get_shape()[-1])],
initializer=tf.truncated_normal_initializer(stddev=0.1))
b_conv4 = tf.get_variable('g_bconv4', [output4_shape[-1]], initializer=tf.constant_initializer(.1))
H_conv4 = tf.nn.conv2d_transpose(H_conv3, W_conv4, output_shape=output4_shape, strides=[1, 2, 2, 1], padding='VALID')
H_conv4 = tf.nn.tanh(H_conv4)
#Dimensions of H_conv4 = batch_size x 28 x 28 x 1
return H_conv4
sess = tf.Session()
z_dimensions = 100
z_test_placeholder = tf.placeholder(tf.float32, [None, z_dimensions])
sample_image = generator(z_test_placeholder, 1, z_dimensions)
test_z = np.random.normal(-1, 1, [1,z_dimensions])
sess.run(tf.global_variables_initializer())
temp = (sess.run(sample_image, feed_dict={z_test_placeholder: test_z}))
my_i = temp.squeeze()
#plt.imshow(my_i, cmap='gray_r')
#plt.show()
batch_size = 16
tf.reset_default_graph() #Since we changed our batch size (from 1 to 16), we need to reset our Tensorflow graph
sess = tf.Session()
x_placeholder = tf.placeholder("float", shape = [None,28,28,1]) #Placeholder for input images to the discriminator
z_placeholder = tf.placeholder(tf.float32, [None, z_dimensions]) #Placeholder for input noise vectors to the generator
Dx = discriminator(x_placeholder) #Dx will hold discriminator prediction probabilities for the real MNIST images
Gz = generator(z_placeholder, batch_size, z_dimensions) #Gz holds the generated images
Dg = discriminator(Gz, reuse=True) #Dg will hold discriminator prediction probabilities for generated images
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dg, labels=tf.ones_like(Dg)))
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dx, labels=tf.ones_like(Dx)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dg, labels=tf.zeros_like(Dg)))
d_loss = d_loss_real + d_loss_fake
tvars = tf.trainable_variables()
d_vars = [var for var in tvars if 'd_' in var.name]
g_vars = [var for var in tvars if 'g_' in var.name]
trainerD = tf.train.AdamOptimizer().minimize(d_loss, var_list=d_vars)
trainerG = tf.train.AdamOptimizer().minimize(g_loss, var_list=g_vars)
sess.run(tf.global_variables_initializer())
iterations = 3000
for i in range(iterations):
z_batch = np.random.normal(-1, 1, size=[batch_size, z_dimensions])
real_image_batch = mnist.train.next_batch(batch_size)
real_image_batch = np.reshape(real_image_batch[0],[batch_size,28,28,1])
_,dLoss = sess.run([trainerD, d_loss],feed_dict={z_placeholder:z_batch,x_placeholder:real_image_batch}) #Update the discriminator
_,gLoss = sess.run([trainerG,g_loss],feed_dict={z_placeholder:z_batch}) #Update the generator
sample_image = generator(z_placeholder, 1, z_dimensions)
z_batch = np.random.normal(-1, 1, size=[1, z_dimensions])
temp = (sess.run(sample_image, feed_dict={z_placeholder: z_batch}))
my_i = temp.squeeze()
plt.imshow(my_i, cmap='gray_r')
plt.show()
它似乎有一个微不足道的解决方案,不幸的是我无法弄清楚。任何帮助将不胜感激。
最佳答案
请按以下方式修改您的代码,
使用 tf.variable_scope(tf.get_variable_scope(),reuse=False):
trainerD = tf.train.AdamOptimizer().minimize(d_loss, var_list=d_vars)
trainerG = tf.train.AdamOptimizer().minimize(g_loss, var_list=g_vars)
关于python - TensorFlow ValueError : Variable does not exist, 或不是用 tf.get_variable() 创建的,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/43183850/
如何将自定义初始化程序指定为 tf.get_variable() 的第三个参数?具体来说,我有一个变量 y,我想使用另一个(已经初始化的)变量 x 对其进行初始化。 使用 tf.Variable()
我想使用 tf.get_variable 创建一个变量,它应该用一个 numpy 数组初始化。 据我所知,有两种创建变量的方法,tf.Variable 和tf.get_variable。我们可以轻松地
看来 tf.get_variable不接受 Tensor仅用于形状 int .这与 tf.Variable 不同。可以接受Tensors .这是否正确,如果正确,是否有解决方法来获取 tf.get_v
我读过 tf.get_variable从这里 question以及 tensorflow 网站上提供的文档中的一些内容。但是,我仍然不清楚,无法在网上找到答案。 怎么样tf.get_variable工
我尝试恢复 session 并调用 get_variable()获取类型的对象 tf.Variable(根据 this answer )。 它无法找到变量。重现该案例的最小示例是 如下。 首先,创建一
用法语法清晰: decay = tf.constant(0.001, dtype=tf.float32) w = tf.get_variable(name='weight', shape=[512,
在这个 answer 中,我了解了 Variable 和 get_variable 之间的区别,但我遇到了另一种获取或创建 model_variable 变量的方法,然后我我很困惑何时使用 get_v
此问题已被问到 here ,不同的是我的问题集中在Estimator . 一些上下文:我们已经使用 estimator 训练了一个模型,并获得了在 Estimator input_fn 中定义的一些变
我正在尝试运行这行代码: var_init_1 = tf.get_variable("var_init_1", [1, 2], dtype=tf.int32, initializer=tf.zero
我的问题与此有关 Tensorflow: How to get a tensor by name? 我可以给操作命名。但实际上他们的名字不同。例如: In [11]: with tf.variable
试图了解 keras 优化器中的 SGD 优化代码 (source code)。在 get_updates 模块中,我们有: # momentum shapes = [K.int_shape(p) f
我正在修改的代码使用 tf.get_variable 作为权重变量,使用 tf.Variable 进行偏差初始化。经过一番搜索,似乎 get_variable 由于其在共享方面的可移植性而应该始终受到
我正在使用网络表单将一些数据写入 MySQL 数据库,当我从表单接收输入数据时,我将其存储在一个对象中; 但是当我尝试在 SQL 查询中使用此数据时,出现错误。 有人可以帮我吗? 谢谢 K 这是对象的
我使用 tf.Variable 创建了一个 tensorflow 变量。我想知道为什么如果我使用相同的名称调用 tf.get_variable 不会引发异常,并且会创建一个名称递增的新变量? impo
我正在尝试从 this site 熟悉 TensorFlow 框架通过玩弄线性回归 (LR)。 LR的源代码可以找到here ,名称为 03_linear_regression_sol.py。 一般来
我只是想了解 TensorFlow 的命名行为,但仍需要一些说明。我在一个项目中遇到了命名张量的麻烦,因为它们是在函数中预定义的,稍后会调用该函数。 所以我这里有下面的例子: import tenso
给定一些占位符张量x,使得: x = tf.placeholder(无, 100) 我想知道为什么: h = tf.zeros(shape = (tf.shape(x)[0], 50)) 可以工作,但
我是 Tensorflow 的新手,正在尝试实现生成对抗网络。我正在关注 this同样的教程,我们正在尝试使用生成模型生成像图像这样的 MNIST 数据集。但是,该代码似乎使用的是旧版本的 Tenso
我想用 numpy 值初始化我网络上的一些变量。为了这个例子考虑: init=np.random.rand(1,2) tf.get_variable('var_name',initializer=in
对于 tensorflow 我想使用 tf.Variable 创建一个 tf 变量 test = tf.Variable() 并使用tf.glorot_uniform_initializer()和 s
我是一名优秀的程序员,十分优秀!