从Tensorflow中的卷积自动编码器获取输出

时间:2017-03-30 08:35:22

标签: python tensorflow neural-network deep-learning

我在TensorFlow中有一个卷积自动编码器。我想把它提供给LSTM,但我不知道如何连接这两个网络。我想到的一种方法是将输出保存到文件中,让单独的LSTM读取它们。

如果我有输入x,如何将其提供给网络并获取编码版本?同样,我该如何解码呢?

编码是否类似?:

new_batch = sess.run([hidden2], feed_dict={x:x_batch})

有没有更好的方法,即直接在编码器上构建RNN?似乎无法找到任何关于此的示例/教程。

n_inputs = 12 * 24
n_hidden1 = 150
n_hidden2 = 80
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 1e-4

def conv2d(x, w):
return tf.nn.conv2d(x, w, strides = [1, 1, 1, 1], padding = 'SAME')


initializer = tf.contrib.layers.variance_scaling_initializer(mode="FAN_AVG", uniform=True)

x = tf.placeholder(tf.float32, [None, n_inputs], name='input_placeholder') # x: batch_size x n_inputs
batch_size = tf.shape(x)[0]
print("x shape:", x.get_shape())

x_image = tf.reshape(x, shape=[-1, 24, 12, 1])
print("x_image shape:", x_image.get_shape())

weight1 = tf.Variable(initializer([3, 3, 1, n_hidden1]))
print("weight1 shape:", weight1.get_shape())
bias1 = tf.Variable(tf.zeros(n_hidden1))
print("bias1 shape:", bias1.get_shape())
hidden1 = tf.nn.relu(conv2d(x_image, weight1)+bias1)
print("hidden1 shape:", hidden1.get_shape())

weight2 = tf.Variable(initializer([3, 3, n_hidden1, n_hidden2]))
print("weight2 shape:", weight2.get_shape())
bias2 = tf.Variable(tf.zeros(n_hidden2))
print("bias2 shape:", bias2.get_shape())
hidden2 = tf.nn.relu(conv2d(hidden1, weight2)+bias2)
print("hidden2 shape:", hidden2.get_shape())

weight3 = tf.Variable(initializer([3, 3, n_hidden2, n_hidden3]))
print("weight3 shape:", weight3.get_shape())
bias3 = tf.Variable(tf.zeros(n_hidden3))
print("bias3 shape:", bias3.get_shape())
hidden3 = tf.nn.relu(conv2d(hidden2, weight3)+bias3)
print("hidden3 shape:", hidden3.get_shape())

hidden3_reshaped = tf.reshape(hidden3, shape=[-1, n_outputs * n_hidden3])
print("hidden3 reshaped:", hidden3_reshaped.get_shape())

weight4 = tf.Variable(initializer([n_outputs * n_hidden3, n_outputs]))
print("weight4 shape:", weight4.get_shape())
bias4 = tf.Variable(tf.zeros(n_outputs))
print("bias4 shape:", bias4.get_shape())
outputs = tf.matmul(hidden3_reshaped, weight4)+bias4
print("outputs shape:", outputs.get_shape())

mse = tf.reduce_mean(tf.square(outputs - x))
loss = mse

optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)

    #init = tf.global_variables_initializer()
    #saver = tf.train.Saver()


#def train_network(g, num_epochs):
with tf.Session() as sess:
    saver = tf.train.Saver()
    #print(seq_reshaped.eval())
    #print(seq_multiple_reshaped.eval())
    sess.run(tf.global_variables_initializer())
    #training_losses = []

    for idx, batch in enumerate(gen_epochs(3)):
        training_losses = 0
        print("epoch", idx)
        step = 0
        for x_batch in batch:
            step = step + 1
            #print(len(x_batch))
            _, b_loss = sess.run([training_op, loss], feed_dict={x:x_batch})
            print("batch", step, ", batch loss:", b_loss)
            training_losses = training_losses+b_loss
        training_losses = training_losses/step
        print("epoch average loss:", training_losses)

    # saving the session to a file
    saver.save(sess, "saves/autoencoder")

0 个答案:

没有答案
相关问题