Tensorflow(python):当使用tf.Variable而不是tf.constant作为输入时,神经网络产生不同的输出?

时间:2017-05-24 02:17:21

标签: python machine-learning tensorflow

我正在使用固定的neaural网络模型:vgg16。加载网络的权重和偏差后,我将图像作为网络的输入。

如果我输入tf.constant,每次转发网时都会得到相同的输出(某层的响应)。但是,如果我将输入图像设置为tf.Variable,我通过将其设置为tf.constant从得到的结果得到不同的输出,如果我多次运行它会改变输出。

这里有一些代码示例: vgg.py

VGG_MEAN = [103.939, 116.779, 123.68]
class Vgg16:
    def __init__(self, vgg16_npy_path=None):
        self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
        print("npy file loaded")

        self.layers = {}

    def build(self, rgb):
        """
        load variable from npy to build the VGG
        :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
        """

        rgb_scaled = rgb * 255.0
        # Convert RGB to BGR
        red, green, blue = tf.split(rgb_scaled,3,3)
        assert red.get_shape().as_list()[1:] == [224, 224, 1]
        assert green.get_shape().as_list()[1:] == [224, 224, 1]
        assert blue.get_shape().as_list()[1:] == [224, 224, 1]
        bgr = tf.concat([
            blue - VGG_MEAN[0],
            green - VGG_MEAN[1],
            red - VGG_MEAN[2],
        ],3)
        assert bgr.get_shape().as_list()[1:] == [224, 224, 3]

        self.conv1_1 = self.conv_layer(bgr, "conv1_1")
        self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
        self.pool1 = self.max_pool(self.conv1_2, 'pool1')
        self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
        self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
        self.pool2 = self.max_pool(self.conv2_2, 'pool2')
        self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
        self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
        self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
        self.pool3 = self.max_pool(self.conv3_3, 'pool3')
        self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
        self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
        self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
        self.pool4 = self.max_pool(self.conv4_3, 'pool4')
        self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
        self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
        self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
        self.pool5 = self.max_pool(self.conv5_3, 'pool5')
        self.fc6 = self.fc_layer(self.pool5, "fc6")
        assert self.fc6.get_shape().as_list()[1:] == [4096]
        self.relu6 = tf.nn.relu(self.fc6)
        self.fc7 = self.fc_layer(self.relu6, "fc7")
        self.relu7 = tf.nn.relu(self.fc7)
        self.fc8 = self.fc_layer(self.relu7, "fc8")
        self.prob = tf.nn.softmax(self.fc8, name="prob")

    def avg_pool(self, bottom, name):
        return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)

    def max_pool(self, bottom, name):
        return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)

    def conv_layer(self, bottom, name):
        filt = self.get_conv_filter(name)
        conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
        conv_biases = self.get_bias(name)
        bias = tf.nn.bias_add(conv, conv_biases)
        relu = tf.nn.relu(bias)
        return relu

    def fc_layer(self, bottom, name):
        shape = bottom.get_shape().as_list()
        dim = 1
        for d in shape[1:]:
            dim *= d
        x = tf.reshape(bottom, [-1, dim])

        weights = self.get_fc_weight(name)
        biases = self.get_bias(name)

        fc = tf.nn.bias_add(tf.matmul(x, weights), biases)

        return fc

    def get_conv_filter(self, name):
        return tf.constant(self.data_dict[name][0], name="filter")

    def get_bias(self, name):
        return tf.constant(self.data_dict[name][1], name="biases")

    def get_fc_weight(self, name):
        return tf.constant(self.data_dict[name][0], name="weights")

test.py

img = utils.load_image(img_path).reshape(1,224,224,3).astype('float32')
model = vgg.Vgg16(vgg16_npy_path)
sess = tf.Session()

input_tensor = tf.constant(img)
model.build(input_tensor)
res1 = sess.run(model.fc8)

input_var = tf.Variable(img)
model.build(input_var)
init_op = tf.global_variables_initializer()
sess.run(init_op)
res2 = sess.run(model.fc8)

input_tensor2 = tf.constant(img)
model.build(input_tensor2)
res3 = sess.run(model.fc8)

print(res1 == res2) # false
print(res1 == res3) # true

在test.py中,res1和res2是不同的,因为它们都是从同一输入图像生成的,所以没有意义。

我该如何解决这个问题?希望对你有所帮助!

1 个答案:

答案 0 :(得分:0)

这可能是常量折叠和非确定性浮点运算之间的相互作用。如果通过使用

创建会话来禁用常量折叠,常量问题是否仍会发生

navigationBar

由于并行性,在张量流模型中预计会出现一些浮点非确定性。