'没有为任何变量提供渐变'同时训练卷积自动编码器

时间:2017-07-08 16:52:17

标签: python tensorflow neural-network autoencoder

我试图创建一个卷积自动编码器,但我遇到了一个问题 这是代码:

import tensorflow as tf
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt

img = mpimg.imread('data.jpg')

x = (img-np.mean(img))/np.std(img)
y = img

epochs = 500

def autoencoder(x, weights):
    global output
    output = tf.nn.conv2d([x], weights[0], strides=[1,1,1,1],padding='SAME')
    output = tf.nn.relu(output)
    output = tf.nn.conv2d(output, weights[1], strides=[1,2,2,1],padding='SAME')
    output = tf.nn.relu(output)
    output = tf.nn.conv2d(output, weights[2], strides=[1,2,2,1],padding='SAME')
    output = tf.nn.relu(output)
    output = tf.nn.conv2d(output, weights[3], strides=[1,2,2,1],padding='SAME')
    output = tf.nn.relu(output)

    output = tf.nn.conv2d(output, weights[4], strides=[1,1,1,1],padding='SAME')
    output = tf.nn.relu(output)
    output = tf.image.resize_images(output, [50, 38])
    output = tf.nn.conv2d(output, weights[5], strides=[1,1,1,1],padding='SAME')
    output = tf.nn.relu(output)
    output = tf.image.resize_images(output, [100, 76])
    output = tf.nn.conv2d(output, weights[6], strides=[1,1,1,1],padding='SAME')
    output = tf.nn.relu(output)
    output = tf.image.resize_images(output, [200, 152])
    output = tf.nn.conv2d(output, weights[7], strides=[1,1,1,1],padding='SAME')

weights = [tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3]))]

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)

    for e in range(epochs):
        print('epoch:',e+1)
        autoencoder(tf.cast(x,tf.float32), weights)
        plt.imshow(output.eval()[0])
        plt.savefig(str(e+1)+'.png')
        cost = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output.eval()[0],y)))
        tf.train.AdamOptimizer().minimize(cost)

这就是错误:

Traceback (most recent call last):
  File "D:\Kay\Tensorflow\Session 3\Autoencoder.py", line 56, in <module>
    tf.train.AdamOptimizer().minimize(cost)
  File "C:\Users\Katharina\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\training\optimizer.py", line 276, in minimize
    ([str(v) for _, v in grads_and_vars], loss))
ValueError: No gradients provided for any variable, check your graph for ops that do not support gradients, between variables ['Tensor("Variable/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_1/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_2/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_3/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_4/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_5/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_6/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_7/read:0", shape=(5, 5, 3, 3), dtype=float32)'] and loss Tensor("Mean_1:0", shape=(), dtype=float32).

任何人都可以帮助我吗?

1 个答案:

答案 0 :(得分:0)

您的代码甚至不能按原样运行,但通过一些重写,您可以获得实际运行的代码。

import tensorflow as tf
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import scipy


img = scipy.misc.imresize(scipy.misc.face(), [200, 152])[None, :]

x = (img-np.mean(img))/np.std(img)
y = img

epochs = 500

def autoencoder(x, weights):
    output = tf.nn.conv2d([x], weights[0], strides=[1,1,1,1],padding='SAME')
    output = tf.nn.relu(output)
    output = tf.nn.conv2d(output, weights[1], strides=[1,2,2,1],padding='SAME')
    output = tf.nn.relu(output)
    output = tf.nn.conv2d(output, weights[2], strides=[1,2,2,1],padding='SAME')
    output = tf.nn.relu(output)
    output = tf.nn.conv2d(output, weights[3], strides=[1,2,2,1],padding='SAME')
    output = tf.nn.relu(output)

    output = tf.nn.conv2d(output, weights[4], strides=[1,1,1,1],padding='SAME')
    output = tf.nn.relu(output)
    output = tf.image.resize_images(output, [50, 38])
    output = tf.nn.conv2d(output, weights[5], strides=[1,1,1,1],padding='SAME')
    output = tf.nn.relu(output)
    output = tf.image.resize_images(output, [100, 76])
    output = tf.nn.conv2d(output, weights[6], strides=[1,1,1,1],padding='SAME')
    output = tf.nn.relu(output)
    output = tf.image.resize_images(output, [200, 152])
    output = tf.nn.conv2d(output, weights[7], strides=[1,1,1,1],padding='SAME')
    return output

weights = [tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3])),
           tf.Variable(tf.random_normal([5,5,3,3]))]

output = autoencoder(tf.cast(x, tf.float32), weights)

cost = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, y)))

train_op = tf.train.AdamOptimizer().minimize(cost)

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)

    for e in range(epochs):
        print('epoch:',e+1)

        output_result, _ = sess.run([output, train_op])

即使这样,它也会返回伪造的价值并且不是很好。

你有几个错误:

  • 您在每次迭代中创建一个新的优化器
  • 您使用eval代替sess.run
  • 你有非常小的过滤器,只有3个通道太小了。
  • 你没有偏见

无论如何,下面给出了代码的简短工作版本,但还有很多改进。

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import scipy


img = scipy.misc.imresize(scipy.misc.face(), [200, 152])[None, :]

x = (img-np.mean(img))/np.std(img)
y = img

epochs = 50000


def apply_conv(x, strides=1, filters=32, activation=tf.nn.relu):
    return tf.layers.conv2d(x, strides=strides, filters=filters, kernel_size=3, padding='SAME',
                            kernel_initializer=tf.contrib.layers.xavier_initializer(),
                            activation=tf.nn.relu)

def autoencoder(x):
    output = apply_conv(x, strides=1)
    output = apply_conv(output, strides=2)
    output = apply_conv(output, strides=2)
    output = apply_conv(output, strides=2)
    output = apply_conv(output, strides=1)

    output = tf.image.resize_images(output, [50, 38])
    output = apply_conv(output, strides=1)

    output = tf.image.resize_images(output, [100, 76])
    output = apply_conv(output, strides=1)

    output = tf.image.resize_images(output, [200, 152])
    output = apply_conv(output, strides=1, filters=3, activation=None)

    return output

output = autoencoder(tf.cast(x, tf.float32))

cost = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, y)))

train_op = tf.train.AdamOptimizer().minimize(cost)

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)

    for e in range(epochs):
        print('epoch:',e+1)

        output_result, cost_result, _ = sess.run([output, cost, train_op])
        print('cost = {}'.format(cost_result))

        if e % 20 == 0:
            plt.imshow(output_result[0].astype('uint8'))
            plt.pause(0.0001)  # wait for plot to show