gpt4 book ai didi

python - 为什么 tf.cond() 将 tf.bool 识别为 python bool 而不是 tf.bool?

转载 作者:太空宇宙 更新时间:2023-11-04 00:40:38 26 4
gpt4 key购买 nike

训练神经网络学习“异或”

我正在尝试使用“批量归一化”,我创建了一个批量归一化层函数“batch_norm1”。

    import tensorflow as tf 
import numpy as np



def batch_norm1(x, dim_of_x , is_training, scope_name='bn1',decay=0.7):
"""
Batch normalization on convolutional maps.
Args:
x:Tensor, batch_size x dim_of_x

"""

with tf.variable_scope(scope_name):

beta = tf.Variable(tf.constant(0.0, shape=[dim_of_x]),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[dim_of_x]),
name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')

shadow_mean=tf.Variable(tf.constant(0.0, shape=[dim_of_x]),
name='shadow_mean', trainable=False)

shadow_var=tf.Variable(tf.constant(0.0, shape=[dim_of_x]),
name='shadow_var', trainable=False)

def mean_var_update():
with tf.control_dependencies([tf.assign(shadow_mean,tf.mul(shadow_mean, decay) + tf.mul(batch_mean, 1. - decay) ),tf.assign(shadow_var,tf.mul(shadow_var, decay) + tf.mul(batch_var, 1. - decay) )]):
return tf.identity(batch_mean), tf.identity(batch_var)


mean , var = tf.cond(is_training , mean_var_update , lambda :tf.identity(shadow_mean), tf.identity(shadow_var))
normed=tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed


def xavier_initializer(shape):
dim_sum = np.sum(shape)
if len(shape) == 1:
dim_sum += 1
bound = np.sqrt(6.0 / dim_sum)

return tf.random_uniform(shape, minval=-bound, maxval=bound)




with tf.Session() as sess:

phase_train=tf.placeholder(dtype=tf.bool,shape=[])

x_=tf.placeholder(tf.float32,shape=[None,2])
y_=tf.placeholder(tf.float32, shape=[None,1], name="y-input")




BN0=batch_norm1(x_, 2, is_training=phase_train, scope_name='bn0')







W_fc1=tf.Variable(xavier_initializer(shape=[2,100]),name="W1")

mul_1=tf.matmul(BN0,W_fc1)


BN1=batch_norm1(mul_1, 100, is_training=phase_train, scope_name='bn1')

h_fc1=tf.nn.relu(BN1)



W_fc2=tf.Variable(xavier_initializer(shape=[100,100]),name="W2")

mul_2=tf.matmul(h_fc1,W_fc2)


BN2=batch_norm1(mul_2, 100, phase_train, scope_name='bn2')

h_fc2=tf.nn.relu(BN2)


W_fc3=tf.Variable(xavier_initializer(shape=[100,100]),name="W3")

mul_3=tf.matmul(h_fc2,W_fc3)


BN3=batch_norm1(mul_3, 100, phase_train, scope_name='bn3')

h_fc3=tf.nn.relu(BN3)


W_fc4=tf.Variable(xavier_initializer(shape=[100,100]),name="W4")

mul_4=tf.matmul(h_fc3,W_fc4)


BN4=batch_norm1(mul_4, 100, phase_train, scope_name='bn4')

h_fc4=tf.nn.relu(BN4)



W_fc5=tf.Variable(xavier_initializer(shape=[100,1]),name="W5")
Bias1=tf.Variable(tf.zeros([1]),name="bias1")
y=tf.matmul(h_fc4,W_fc5)+Bias1

loss=tf.reduce_mean(tf.square(y-y_))



train_step=tf.train.AdamOptimizer(1e-3).minimize(loss)

XOR_X=np.array([[0.0,0.0],[0.0,1.0],[1.0,0.0],[1.0,1.0]])
XOR_Y=np.array([[0.0],[1.0],[1.0],[0.0]])
data=np.append(XOR_X,XOR_Y,axis=1)

print data

sess.run(tf.global_variables_initializer())

for i in range(60000):

feed_dictionary={x_:data[:,0:2].reshape([-1,2]),y_:data[:,2].reshape([-1,1]),phase_train:True}
feed_dictionary_predict={x_:data[:,0:2].reshape([-1,2]),y_:data[:,2].reshape([-1,1]),phase_train:False}
sess.run(train_step, feed_dict=feed_dictionary)


print('cost ',sess.run(loss, feed_dict=feed_dictionary_predict))

for vv in tf.global_variables():
if vv.name == "bn0/shadow_mean:0":
print ("shadow_mean_of_x_ : {0} ".format(10000000.0* sess.run(vv)))

print()

当我运行上面的代码时,出现以下错误:

Traceback (most recent call last):
File "/home/minho/PycharmProjects/new_RL_algorithm/real_batch_normalization_test.py", line 120, in <module>
BN0=batch_norm1(x_, 2, is_training=phase_train, scope_name='bn0')
File "/home/minho/PycharmProjects/new_RL_algorithm/real_batch_normalization_test.py", line 89, in batch_norm1
mean , var = tf.cond(is_training , mean_var_update , lambda :tf.identity(shadow_mean), tf.identity(shadow_var))
File "/home/minho/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/control_flow_ops.py", line 1756, in cond
with ops.name_scope(name, "cond", [pred]) as name:
File "/home/minho/anaconda2/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/home/minho/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 4056, in name_scope
with g.as_default(), g.name_scope(n) as scope:
File "/home/minho/anaconda2/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/home/minho/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2742, in name_scope
if name:
File "/home/minho/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 547, in __nonzero__
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
TypeError: Using a `tf.Tensor` as a Python `bool` is not allowed. Use `if t is not None:` instead of `if t:` to test if a tensor is defined, and use TensorFlow ops such as tf.cond to execute subgraphs conditioned on the value of a tensor.

我用过

phase_train=tf.placeholder(dtype=tf.bool,shape=[])

,其类型为“tf.bool”,作为函数“batch_norm1”的输入。但是,错误说我使用了 python bool,而不是 tf.bool。谁能告诉我这段代码是怎么回事??

最佳答案

试试 tf.cast(is_training, tf.bool)。那应该可行。所以你的代码应该变成:

mean , var = tf.cond(tf.cast(is_training, tf.bool) , mean_var_update , lambda :tf.identity(shadow_mean), tf.identity(shadow_var))

如果有效,请告诉我。

关于python - 为什么 tf.cond() 将 tf.bool 识别为 python bool 而不是 tf.bool?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/42066512/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com