我正在尝试将自动编码器与Tensorflow一起使用,并且我想提取隐藏层以减小原始特征的维数。
我定义了一个基本的自动编码器,如下所示,它的“编码”隐藏层大小为35,从151个输入要素开始:
num_inputs=151 # number features
num_hid1=75
num_hid2=35
num_hid3=num_hid1
num_output=num_inputs
lr=0.01
actf=tf.nn.relu
X=tf.placeholder(tf.float32,shape=[None,num_inputs])
initializer=tf.variance_scaling_initializer()
w1=tf.Variable(initializer([num_inputs,num_hid1]),dtype=tf.float32)
w2=tf.Variable(initializer([num_hid1,num_hid2]),dtype=tf.float32)
w3=tf.Variable(initializer([num_hid2,num_hid3]),dtype=tf.float32)
w4=tf.Variable(initializer([num_hid3,num_output]),dtype=tf.float32)
b1=tf.Variable(tf.zeros(num_hid1))
b2=tf.Variable(tf.zeros(num_hid2))
b3=tf.Variable(tf.zeros(num_hid3))
b4=tf.Variable(tf.zeros(num_output))
hid_layer1=actf(tf.matmul(X,w1)+b1)
hid_layer2=actf(tf.matmul(hid_layer1,w2)+b2)
hid_layer3=actf(tf.matmul(hid_layer2,w3)+b3)
output_layer=actf(tf.matmul(hid_layer3,w4)+b4)
loss=tf.reduce_mean(tf.square(output_layer-X))
optimizer=tf.train.AdamOptimizer(lr)
train=optimizer.minimize(loss)
init=tf.global_variables_initializer()
num_epoch=5
#batch_size=150
num_test_images=10
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epoch):
X_batch = data
sess.run(train,feed_dict={X:X_batch})
train_loss=loss.eval(feed_dict={X:X_batch})
print("epoch {} loss {}".format(epoch,train_loss))
results = output_layer.eval(feed_dict={X:data[:num_test_images]})
如何获取隐藏层?我想要长度为35的所有相应向量