Tensorflow RNN占位符值错误

时间:2018-03-01 20:09:38

标签: python-3.x tensorflow

我一直试图跟进RNN using Tensorflow。这是一个简单的程序,但不知何故无法在TF v1.5上运行

我已经尝试完全遵循教程,并尝试查看评论部分,但无济于事。

不知怎的,我无法找出解决问题的方法。

这是我的代码:

import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt

get_ipython().magic('matplotlib inline')


print('Tensorflow Version: %s' % tf.__version__)


# number of iterations 
num_epochs = 1000

total_series_length = 50000

truncated_backprop_length = 15

state_size = 4 

num_classes = 2 

echo_step = 3

batch_size = 5

num_batches = total_series_length//batch_size//truncated_backprop_length


def generate_data():
    x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))
    y = np.roll(x, echo_step)

    x = x.reshape(batch_size, -1)
    y = y.reshape(batch_size, -1)

    return x, y


batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
batchY_placeholder = tf.placeholder(tf.int64, [batch_size, truncated_backprop_length])


# RNN state, saved output from the previous run
init_state = tf.placeholder(tf.float32, [batch_size, state_size])


W = tf.Variable(np.random.rand(state_size+1, state_size), dtype=tf.float32)
b = tf.Variable(np.zeros((1, state_size)), dtype=tf.float32)


W2 = tf.Variable(np.random.rand(state_size, num_classes), dtype=tf.float32)
b2 = tf.Variable(np.zeros((1, num_classes)), dtype=tf.float32)


input_series = tf.unstack(batchX_placeholder, axis=1)
labels_series = tf.unstack(batchY_placeholder, axis=1)


# forward pass
current_state = init_state

state_series = []

for current_input in input_series:
    current_input = tf.reshape(current_input, [batch_size, 1])
    # Increasing number of columns
    input_and_state_concatenated = tf.concat(values=[current_input, current_state], axis=1)


    # Broadcasted addition 
    next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b)
    state_series.append(next_state)
    current_state = next_state



# Broacast addition
logits_series = [tf.matmul(state, W2) + b2 for state in state_series]
prediction_series = [tf.nn.softmax(logits) for logits in logits_series]

losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) for logits, labels in zip(logits_series, labels_series)]
total_loss = tf.reduce_mean(losses)

train_step = tf.train.AdagradOptimizer(0.3).minimize(total_loss)




def plot(loss_list, prediction_series, batchX, batchY):
    plt.subplot(2, 3, 1)
    plt.cla()
    plt.plot(loss_list)

    for batch_series_idx in range(5):
        one_hot_output_series = np.array(prediction_series)[:,batch_series_idx, :]
        single_output_series = np.array([(1 if out[0]<0.5 else 0) for out in one_hot_output_series])

        plt.subplot(2, 3, batchY + 2)
        plt.cla()
        plt.axix(0, truncated_backprop_length, 0, 2)
        left_offset = range(truncated_backprop_length)
        plt.bar(left_offset, batchX[batch_series_idx, :], width=1, color='blue')
        plt.bar(left_offset, batchY[batch_series_idx, :] * 0.5, width=1, color='red')
        plt.bar(left_offset, single_output_series * 0.3, width=1, color='green')

    plt.draw()
    plt.pause(0.0001)



with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    plt.ion()
    plt.figure()
    plt.show()
    loss_list = []


    for epoch_idx in range(num_epochs):
        x, y = generate_data()
        _current_state = np.zeros((batch_size, state_size))

        print('New data, epoch', epoch_idx)

        for batch_idx in range(num_batches):
            start_idx = batch_idx * truncated_backprop_length
            end_idx = start_idx * truncated_backprop_length

            batchX = x[:,start_idx:end_idx]
            batchY = y[:,start_idx:end_idx]

            _total_loss, _train_step, _current_state, _predictions_series = sess.run(
            [total_loss, train_step, current_state, prediction_series],
            feed_dict={
                batchX_placeholder:batchX,
                batchY_placeholder:batchY,
                init_state:_current_state
            })

            loss_list.append(_total_loss)

            if batch_idx%100 == 0:
                print('Step ', batch_idx, ' Loss ', _total_loss)
                plot(loss_list, _predictions_series, batchX, batchY)

plt.iof()
plt.show()

我正努力解决以下错误:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-45-1c187b75429b> in <module>()
     25                 batchX_placeholder:batchX,
     26                 batchY_placeholder:batchY,
---> 27                 init_state:_current_state
     28             })
     29 

~\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    893     try:
    894       result = self._run(None, fetches, feed_dict, options_ptr,
--> 895                          run_metadata_ptr)
    896       if run_metadata:
    897         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

~\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1102                 'Cannot feed value of shape %r for Tensor %r, '
   1103                 'which has shape %r'
-> 1104                 % (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
   1105           if not self.graph.is_feedable(subfeed_t):
   1106             raise ValueError('Tensor %s may not be fed.' % subfeed_t)

ValueError: Cannot feed value of shape (5, 0) for Tensor 'Placeholder_6:0', which has shape '(5, 15)'

1 个答案:

答案 0 :(得分:0)

原因是batchX(也是batchY)的形状与batchX_placeholder(batchY_placeholder)的形状不相似。您可以通过

验证这一点
print(np.shape(batchX)) 
print(np.shape(batchX_placeholder))

希望这有帮助。