尽管运行了global_variable_initializer

时间:2018-04-22 20:46:44

标签: python variables tensorflow machine-learning deep-learning

我是Tensorflow的新手。我之前在Caffe工作过。我正在尝试在Tensorflow中实现http://cvlab.cse.msu.edu/pdfs/Tai_Yang_Liu_CVPR2017.pdf

我在Tensorflow中遇到变量问题,尽管已将其初始化。我尝试使用tf.get_variable而不是tf.Variable,但这不起作用。设置initializer = tf.contrib.layers.xavier_initializer()什么也没做。

我的代码:

import tensorflow as tf
import sys, os
import numpy as np

global xseed

def get_model(inp, train):
    #create architecture
    #db input, 128 batch size
    with tf.name_scope('input'):
        inl = tf.reshape(inp, [-1, 31, 31, 1])

    with tf.name_scope('batchnorm_scale_relu'):
        #batchnorm,scale,relu
        normed = tf.contrib.layers.batch_norm(inl, is_training=train)

        alpha = tf.Variable(tf.truncated_normal((1,), stddev=0.1), trainable=True, name="alpha")
        beta = tf.Variable(tf.truncated_normal((1,), stddev=0.1), trainable=True, name="beta")
        scaled = alpha * normed + beta

        relud = tf.nn.relu(scaled, name="relu1")

    with tf.name_scope('conv1'):
        #??? padding ??? is correct
        padded_input = tf.pad(relud, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
        #weight initializer msra
        convolved = tf.layers.conv2d(inputs=padded_input, filters=128, kernel_size=(3, 3), strides=(1, 1), padding="VALID",
                kernel_initializer=tf.contrib.layers.variance_scaling_initializer(seed=xseed))

    with tf.name_scope('batchnorm_scale_relu_1a'):
        #batchnorm,scale,relu
        normed_1a = tf.contrib.layers.batch_norm(convolved, is_training=train)

        alpha_1a = tf.Variable(tf.truncated_normal((1,), stddev=0.1), trainable=True, name="alpha_1a")
        beta_1a = tf.Variable(tf.truncated_normal((1,), stddev=0.1), trainable=True, name="beta_1a")
        scaled_1a = alpha_1a * normed_1a + beta_1a

        relud_1a = tf.nn.relu(scaled_1a, name="relu1_1a")

    with tf.name_scope('conv1_1a'):
        padded_input_1a = tf.pad(relud_1a, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
        #weight initializer msra
        convolved_1a = tf.layers.conv2d(inputs=padded_input_1a, filters=128, kernel_size=(3, 3), strides=(1, 1), padding="VALID",
                kernel_initializer=tf.contrib.layers.variance_scaling_initializer(seed=xseed))
    return convolved_1a

xseed = None
with tf.device("cpu"):
    inp = tf.placeholder(tf.float32, [None, 31, 31, 1])
    init_op = tf.global_variables_initializer()
    init_op2 = tf.local_variables_initializer()

    with tf.Session(config=tf.ConfigProto()) as sess:
        m = get_model(inp, True)
        sess.run(init_op)
        sess.run(init_op2)
        print(sess.run(tf.report_uninitialized_variables()))
        res = sess.run(m, feed_dict={ inp: np.zeros((1, 31, 31, 1))})

它会报告未初始化的变量:

['BatchNorm/beta' 'BatchNorm/moving_mean' 'BatchNorm/moving_variance'  'batchnorm_scale_relu/alpha' 'batchnorm_scale_relu/beta' 'conv2d/kernel'  'conv2d/bias' 'BatchNorm_1/beta' 'BatchNorm_1/moving_mean'  'BatchNorm_1/moving_variance' 'batchnorm_scale_relu_1a/alpha_1a'  'batchnorm_scale_relu_1a/beta_1a' 'conv2d_1/kernel' 'conv2d_1/bias']

在评估卷积张量时给出了一个例外:

FailedPreconditionError (see above for traceback): Attempting to use uninitialized value batchnorm_scale_relu_1a/alpha_1a
     [[Node: batchnorm_scale_relu_1a/alpha_1a/read = Identity[T=DT_FLOAT, _class=["loc:@batchnorm_scale_relu_1a/alpha_1a"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](batchnorm_scale_relu_1a/alpha_1a)]]

请帮帮我。

1 个答案:

答案 0 :(得分:-1)

自己解决了。取而代之:

with tf.device("cpu"):
    inp = tf.placeholder(tf.float32, [None, 31, 31, 1])
    init_op = tf.global_variables_initializer()
    init_op2 = tf.local_variables_initializer()

    with tf.Session(config=tf.ConfigProto()) as sess:
        m = get_model(inp, True)
        sess.run(init_op)
        sess.run(init_op2)
        print(sess.run(tf.report_uninitialized_variables()))
        res = sess.run(m, feed_dict={ inp: np.zeros((1, 31, 31, 1))})

有了这个:

with tf.device("cpu"):
    inp = tf.placeholder(tf.float32, [None, 31, 31, 1])

    with tf.Session(config=tf.ConfigProto()) as sess:
        m = get_model(inp, True)
        sess.run(tf.initialize_all_variables())
        res = sess.run(tf.report_uninitialized_variables())
        #print(res) -- outputs [] (none)
        res = sess.run(m, feed_dict={ inp: np.zeros((1, 31, 31, 1))})
        print(res)

整点是tf.initialize_all_variables()而不是tf.global_variables_initializer()