为什么tf.cond()将tf.bool识别为python bool而不是tf.bool?

时间:2017-02-06 11:20:08

标签: python tensorflow deep-learning

训练NN学习'Xor'

我正在尝试使用'批量规范化',我制作了批量规范化层功能'batch_norm1'。

    import tensorflow as tf 
    import numpy as np



    def batch_norm1(x, dim_of_x , is_training, scope_name='bn1',decay=0.7):
    """
    Batch normalization on convolutional maps.
    Args:
    x:Tensor, batch_size   x   dim_of_x

    """

      with tf.variable_scope(scope_name):

         beta = tf.Variable(tf.constant(0.0, shape=[dim_of_x]),
                                 name='beta', trainable=True)
         gamma = tf.Variable(tf.constant(1.0, shape=[dim_of_x]),
                                  name='gamma', trainable=True)
         batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')

         shadow_mean=tf.Variable(tf.constant(0.0, shape=[dim_of_x]),
                                 name='shadow_mean', trainable=False)

         shadow_var=tf.Variable(tf.constant(0.0, shape=[dim_of_x]),
                                 name='shadow_var', trainable=False)

         def mean_var_update():
            with tf.control_dependencies([tf.assign(shadow_mean,tf.mul(shadow_mean, decay) +  tf.mul(batch_mean, 1. - decay) ),tf.assign(shadow_var,tf.mul(shadow_var, decay) +  tf.mul(batch_var, 1. - decay) )]):
                return  tf.identity(batch_mean), tf.identity(batch_var)


         mean , var = tf.cond(is_training , mean_var_update , lambda :tf.identity(shadow_mean), tf.identity(shadow_var))
         normed=tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
    return normed


    def xavier_initializer(shape):
       dim_sum = np.sum(shape)
       if len(shape) == 1:
          dim_sum += 1
       bound = np.sqrt(6.0 / dim_sum)

       return tf.random_uniform(shape, minval=-bound, maxval=bound)




    with tf.Session() as sess:

       phase_train=tf.placeholder(dtype=tf.bool,shape=[])

       x_=tf.placeholder(tf.float32,shape=[None,2])
       y_=tf.placeholder(tf.float32, shape=[None,1], name="y-input")




       BN0=batch_norm1(x_, 2, is_training=phase_train, scope_name='bn0')







       W_fc1=tf.Variable(xavier_initializer(shape=[2,100]),name="W1")

       mul_1=tf.matmul(BN0,W_fc1)


       BN1=batch_norm1(mul_1, 100, is_training=phase_train, scope_name='bn1')

       h_fc1=tf.nn.relu(BN1)



       W_fc2=tf.Variable(xavier_initializer(shape=[100,100]),name="W2")

       mul_2=tf.matmul(h_fc1,W_fc2)


       BN2=batch_norm1(mul_2, 100, phase_train, scope_name='bn2')

       h_fc2=tf.nn.relu(BN2)


       W_fc3=tf.Variable(xavier_initializer(shape=[100,100]),name="W3")

       mul_3=tf.matmul(h_fc2,W_fc3)


       BN3=batch_norm1(mul_3, 100, phase_train, scope_name='bn3')

       h_fc3=tf.nn.relu(BN3)


       W_fc4=tf.Variable(xavier_initializer(shape=[100,100]),name="W4")

       mul_4=tf.matmul(h_fc3,W_fc4)


       BN4=batch_norm1(mul_4, 100, phase_train, scope_name='bn4')

       h_fc4=tf.nn.relu(BN4)



       W_fc5=tf.Variable(xavier_initializer(shape=[100,1]),name="W5")
       Bias1=tf.Variable(tf.zeros([1]),name="bias1")
       y=tf.matmul(h_fc4,W_fc5)+Bias1

       loss=tf.reduce_mean(tf.square(y-y_))



       train_step=tf.train.AdamOptimizer(1e-3).minimize(loss)

       XOR_X=np.array([[0.0,0.0],[0.0,1.0],[1.0,0.0],[1.0,1.0]])
       XOR_Y=np.array([[0.0],[1.0],[1.0],[0.0]])
       data=np.append(XOR_X,XOR_Y,axis=1)

       print data

       sess.run(tf.global_variables_initializer())

       for i in range(60000):

           feed_dictionary={x_:data[:,0:2].reshape([-1,2]),y_:data[:,2].reshape([-1,1]),phase_train:True}
           feed_dictionary_predict={x_:data[:,0:2].reshape([-1,2]),y_:data[:,2].reshape([-1,1]),phase_train:False}
           sess.run(train_step, feed_dict=feed_dictionary)


           print('cost   ',sess.run(loss, feed_dict=feed_dictionary_predict))

           for vv in tf.global_variables():
               if vv.name == "bn0/shadow_mean:0":
                   print ("shadow_mean_of_x_ : {0} ".format(10000000.0* sess.run(vv)))

           print()

当我运行上面的代码时,我收到以下错误:

Traceback (most recent call last):
File "/home/minho/PycharmProjects/new_RL_algorithm/real_batch_normalization_test.py", line 120, in <module>
BN0=batch_norm1(x_, 2, is_training=phase_train, scope_name='bn0')
File "/home/minho/PycharmProjects/new_RL_algorithm/real_batch_normalization_test.py", line 89, in batch_norm1
mean , var = tf.cond(is_training , mean_var_update , lambda :tf.identity(shadow_mean), tf.identity(shadow_var))
File "/home/minho/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/control_flow_ops.py", line 1756, in cond
with ops.name_scope(name, "cond", [pred]) as name:
File "/home/minho/anaconda2/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/home/minho/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 4056, in name_scope
with g.as_default(), g.name_scope(n) as scope:
File "/home/minho/anaconda2/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/home/minho/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2742, in name_scope
if name:
File "/home/minho/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 547, in __nonzero__
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
TypeError: Using a `tf.Tensor` as a Python `bool` is not allowed. Use `if t is not None:` instead of `if t:` to test if a tensor is defined, and use TensorFlow ops such as tf.cond to execute subgraphs conditioned on the value of a tensor.

我用过

phase_train=tf.placeholder(dtype=tf.bool,shape=[])

,其类型为“tf.bool”,作为函数'batch_norm1'的输入。但是,错误说我使用的是python bool,而不是tf.bool。谁能告诉我这段代码中发生了什么?

2 个答案:

答案 0 :(得分:2)

试试tf.cast(is_training, tf.bool)。这应该成功。 所以你的代码应该成为:

mean , var = tf.cond(tf.cast(is_training, tf.bool) , mean_var_update , lambda :tf.identity(shadow_mean), tf.identity(shadow_var))

如果有效,请告诉我。

答案 1 :(得分:1)

这一行对我来说不正确:

 mean , var = tf.cond(is_training , mean_var_update , lambda :tf.identity(shadow_mean), tf.identity(shadow_var))

特别是,tf.identity(shadow_var)被视为tf.cond()的第四个参数(即name参数),而不是lambda的第二个返回值(因为我认为你打算)。添加括号以创建两个tf.identity()张量的元组可以解决此问题:

 mean, var = tf.cond(is_training,
                     mean_var_update,
                     lambda: (tf.identity(shadow_mean), tf.identity(shadow_var)))