Tensor-Flow错误:无法将<class'dict'=“”>类型的对象转换为Tensor

时间:2018-06-08 19:57:31

标签: python tensorflow

我正在尝试使用以下代码构建和训练AutoEncoder。当我训练模型时,无法将dict转换为张量的错误显示出来。我认为它与我的模型有关但我找不到错误。谁能帮我这个?谢谢。

X = tf.placeholder("float", [None, num_input])

weights = {
    'encoder_h1': tf.Variable(tf.random_normal([num_input, num_hidden_1])),
    'encoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2])),
    'decoder_h1': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_1])),
    'decoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_input]))}

biases = {
    'encoder_b1': tf.Variable(tf.random_normal([num_hidden_1])),
    'encoder_b2': tf.Variable(tf.random_normal([num_hidden_My model is2])),
    'decoder_b1': tf.Variable(tf.random_normal([num_hidden_1])),
    'decoder_b2': tf.Variable(tf.random_normal([num_input])),}

# Building the encoder
def encoder(x):
    # Encoder Hidden layer with sigmoid activation #1
    layer_1 = tf.nn.relu(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))
    # Encoder Hidden layer with sigmoid activation #2
    layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, weights['encoder_h2']),biases['encoder_b2']))
    return layer_2

# Building the decoder
def decoder(x):
    # Decoder Hidden layer with sigmoid activation #1
    layer_1 = tf.nn.relu(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))
    # Decoder Hidden layer with sigmoid activation #2
    layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))
    return layer_2

# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)

# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X

# Define loss and optimizer, minimize the squared error
loss = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)


# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
saver = tf.train.Saver()

# Start Training
# Start a new TF session
with tf.Session() as sess:
    writer = tf.summary.FileWriter('./graph', sess.graph)
    # Run the initializer
    sess.run(init)
    saver.save(sess,'./mark2',global_step = 1)
    # Training
    for i in range(0, num_steps):
        # Prepare Data
        fname = "md_0_2_new."+str(i)
        train_batch = np.reshape(minibatch(fname)[:,:,0],[10,num_input],order="f")
        print(np.shape(train_batch))
        for j in range(0,10):
            _, l = sess.run([optimizer, loss], feed_dict={X:np.reshape(train_batch[j],[-1,num_input])})

        tf.summary.scalar('loss', l)
        tf.summary.scalar('weights',weights)
        if i % display_step == 0:
            print('Step %i: Minibatch Loss: %f' % (i, l))

错误讯息:

TypeError: Failed to convert object of type <class 'dict'> to Tensor. Contents: {'encoder_h1': <tf.Variable 'Variable:0' shape=(33876, 256) dtype=float32_ref>, 'encoder_h2': <tf.Variable 'Variable_1:0' shape=(256, 128) dtype=float32_ref>, 'decoder_h1': <tf.Variable 'Variable_2:0' shape=(128, 256) dtype=float32_ref>, 'decoder_h2': <tf.Variable 'Variable_3:0' shape=(256, 33876) dtype=float32_ref>}. Consider casting elements to a supported type.

1 个答案:

答案 0 :(得分:1)

错误来自此行:tf.summary.scalar('weights',weights)tf.summary.scalar的输入应该是张量而不是字典。因此,为了节省您的重量,您需要这样做:

tf.summary.scalar('weights_h1',weights['encoder_h1'])