简单的RNN网络 - ValueError:使用序列设置数组元素

时间:2017-07-20 09:17:01

标签: python tensorflow deep-learning rnn

我完全放弃了对TF和Python的耐心,我无法让它工作, " ValueError:使用序列设置数组元素。"在调用sess.run时在testx上。

我尝试了很多不同的事情......好像TF被打破了,有人可以帮忙吗?

import tensorflow as tf
import numpy as np

nColsIn = 1
nSequenceLen = 4
nBatches = 8
nColsOut = 1
rnn_size = 228

modelx = tf.placeholder("float",[None,nSequenceLen,1])
modely = tf.placeholder("float",[None,nColsOut])

testx = [tf.convert_to_tensor(np.zeros([nColsIn,nBatches])) for b in range(nSequenceLen)]
testy = np.zeros([nBatches, nColsOut])

layer = {
    'weights': tf.Variable(tf.random_normal([rnn_size, nColsOut],dtype=tf.float64),),
    'biases': tf.Variable(tf.random_normal([nColsOut],dtype=tf.float64))}

lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(rnn_size, forget_bias=1.0)
outputs, states = tf.nn.static_rnn(lstm_cell,modelx ,dtype=tf.float64)
prediction = tf.matmul(outputs[-1], layer['weights']) + layer['biases']

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=modely))
optimizer = tf.train.AdamOptimizer().minimize(cost)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(modely, 1));
    accuracy = tf.reduce_mean(tf.cast(correct, 'float'))

    _, epoch_loss = sess.run([optimizer, cost], feed_dict={modelx: testx, modely: testy})
    print('Epoch Loss: ',epoch_loss,' Accuracy: ', accuracy.eval({modelx: testx, modely: testy}))

1 个答案:

答案 0 :(得分:1)

这可能是你想要的。您可以在代码中的注释中找到一些评论。

import tensorflow as tf
import numpy as np

nColsIn = 1
nSequenceLen = 4
nBatches = 8
nColsOut = 1
rnn_size = 228

# As you use static_rnn it has to be a list of inputs
modelx = [tf.placeholder(tf.float64,[nBatches, nColsIn]) for _ in range(nSequenceLen)]
modely = tf.placeholder(tf.float64,[None,nColsOut])

# testx should be a numpy array and is not part of the graph
testx = [np.zeros([nBatches,nColsIn]) for _ in range(nSequenceLen)]
testy = np.zeros([nBatches, nColsOut])

layer = {
    'weights': tf.Variable(tf.random_normal([rnn_size, nColsOut],dtype=tf.float64),),
    'biases': tf.Variable(tf.random_normal([nColsOut],dtype=tf.float64))}

lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(rnn_size, forget_bias=1.0)
# Replaced testx by modelx
outputs, states = tf.nn.static_rnn(lstm_cell,modelx, dtype=tf.float64)
# output is of shape (8, 4, 128), you probably want the last element in the sequence direction
prediction = tf.matmul(outputs[-1], layer['weights']) + layer['biases']

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=modely))
optimizer = tf.train.AdamOptimizer().minimize(cost)

if __name__ == '__main__':
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(modely, 1));
        accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
        feed_dict = {k: v for k,v in zip(modelx, testx)}
        feed_dict[modely] = testy
        _, epoch_loss = sess.run([optimizer, cost], feed_dict=feed_dict)
        print('Epoch Loss: ',epoch_loss,' Accuracy: ', accuracy.eval(feed_dict))