无法将np.zeros发送到tf.placeholder

时间:2016-10-13 04:57:28

标签: python numpy tensorflow

我最近一直在学习张量流,我无法将相应大小和类型的数值数据发送到tensorflow中的占位符。此代码已改编自https://github.com/SullyChen/Nvidia-Autopilot-TensorFlow

我的模特档案:

import tensorflow as tf
import scipy
from tensorflow.python.ops import rnn, rnn_cell


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

def conv2d(x, W, stride):
    return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='VALID')



class AutopilotRCNModel(object):
    """
    Use Recurrent Neural Networks in combination with Convolutional Neural
    Networks to predict steering wheel angle.
    """
    def __init__(self, n_hidden=50):

        # Create a state variable to be passed in through feed_dict
        self.c_state = tf.placeholder(tf.float32, shape=[1, n_hidden])
        self.h_state = tf.placeholder(tf.float32, shape=[1, n_hidden])

        # Do the same with the input x and the target variable y_
        self.x = tf.placeholder(tf.float32, shape=[None, 66, 200, 3])
        self.y_ = tf.placeholder(tf.float32, shape=[None, 1])

        x_image = self.x

        # First convolutional layer.
        W_conv1 = weight_variable([5, 5, 3, 24])
        b_conv1 = bias_variable([24])

        # Stride is 2.
        h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1, 2) + b_conv1)

        # Second convolutional layer.
        W_conv2 = weight_variable([5, 5, 24, 36])
        b_conv2 = bias_variable([36])

        # Stride still set to 2.
        h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2, 2) + b_conv2)

        # Third convolutional layer.
        W_conv3 = weight_variable([5, 5, 36, 48])
        b_conv3 = bias_variable([48])

        # Stride still 2.
        h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 2) + b_conv3)

        # Fourth convolutional layer.
        W_conv4 = weight_variable([3, 3, 48, 64])
        b_conv4 = bias_variable([64])

        # Stride now set to 1.
        h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4, 1) + b_conv4)

        # Fifth convolutional layer.
        W_conv5 = weight_variable([3, 3, 64, 64])
        b_conv5 = bias_variable([64])

        # Stride of 1.
        h_conv5 = tf.nn.relu(conv2d(h_conv4, W_conv5, 1) + b_conv5)

        # Fully connected layer 1.
        W_fc1 = weight_variable([1152, 1164])
        b_fc1 = bias_variable([1164])

        # Requires flattening out the activations.
        h_conv5_flat = tf.reshape(h_conv5, [-1, 1152])
        h_fc1 = tf.nn.relu(tf.matmul(h_conv5_flat, W_fc1) + b_fc1)

        self.keep_prob = tf.placeholder(tf.float32)
        h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)

        # Fully connected layer 2.
        W_fc2 = weight_variable([1164, 100])
        b_fc2 = bias_variable([100])

        h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

        h_fc2_drop = tf.nn.dropout(h_fc2, self.keep_prob)

        print(h_fc2_drop.get_shape())


        # LSTM layer 1

        # Create weight matrix and bias to map from output of LSTM
        # to steering wheel angle in radians.

        # Input gate input weights, recurrent weights, and bias.
        W_i = weight_variable([100, n_hidden])
        U_i = weight_variable([n_hidden, n_hidden])
        b_i = bias_variable([n_hidden])

        # Forget gate input weights, recurrent weights, and bias.
        W_f = weight_variable([100,n_hidden])
        U_f = weight_variable([n_hidden, n_hidden])
        b_f = bias_variable([n_hidden])

        # Candidate gate input weights, recurrent weights, and bias.
        W_c = weight_variable([100, n_hidden])
        U_c = weight_variable([n_hidden,n_hidden])
        b_c = bias_variable([n_hidden])

        # Output gate input weights, recurrent weights and bias.
        W_o = weight_variable([100, n_hidden])
        U_o = weight_variable([n_hidden, n_hidden])
        b_o = bias_variable([1])

        V_o = weight_variable([n_hidden, n_hidden])

        ingate = tf.nn.sigmoid(tf.matmul(h_fc2_drop, W_i) + tf.matmul(self.h_state, U_i) + b_i)

        cgate = tf.nn.tanh(tf.matmul(h_fc2_drop, W_c) + tf.matmul(self.h_state, U_c) + b_c)

        fgate = tf.nn.sigmoid(tf.matmul(h_fc2_drop, W_f) + tf.matmul(self.h_state, U_f) + b_f)

        self.c_state = tf.mul(ingate, cgate) + tf.mul(fgate, self.c_state)

        h_rnn1 = tf.nn.sigmoid(tf.matmul(h_fc2_drop, W_o) + \
        tf.matmul(self.h_state, U_o) + tf.matmul(self.c_state, V_o) + b_o)

        self.h_state = tf.mul(h_rnn1, tf.nn.tanh(self.c_state))

        W_out = weight_variable([n_hidden,1])
        b_out = bias_variable([1])

        self.y = tf.mul(tf.atan(tf.matmul(h_rnn1, W_out) + b_out), 2)

        self.loss = tf.square(tf.sub(self.y, self.y_))

第一个测试人员代码在这里:

import model
import driving_data

import tensorflow as tf
import numpy as np

n_hidden = 65

rcn = model.AutopilotRCNModel(n_hidden=n_hidden)

xs, ys = driving_data.LoadTrainBatch(1)

c_state = np.zeros((1,n_hidden),dtype=np.float32)
h_state = np.zeros((1,n_hidden),dtype=np.float32)

sess = tf.InteractiveSession()

train_step = tf.train.AdamOptimizer().minimize(rcn.loss)

sess.run(tf.initialize_all_variables())

train_step.run(
    feed_dict = {
        rcn.x: xs,
        rcn.y_: ys,
        rcn.keep_prob: 0.8,
        rcn.c_state: c_state,
        rcn.h_state: h_state
        }
)

我收到了这个错误:

Traceback (most recent call last):
  File "test.py", line 28, in <module>
    rcn.h_state: h_state
  File "/home/thomas/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1621, in run
    _run_using_default_session(self, feed_dict, self.graph, session)
  File "/home/thomas/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3804, in _run_using_default_session
    session.run(operation, feed_dict)
  File "/home/thomas/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 717, in run
    run_metadata_ptr)
  File "/home/thomas/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 915, in _run
    feed_dict_string, options, run_metadata)
  File "/home/thomas/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 965, in _do_run
    target_list, options, run_metadata)
  File "/home/thomas/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 985, in _do_call
    raise type(e)(node_def, op, message)
tensorflow.python.framework.errors.InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [1,65]
     [[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=[1,65], _device="/job:localhost/replica:0/task:0/gpu:0"]()]]
     [[Node: _recv_add_13_0 = _Recv[client_terminated=true, recv_device="/job:localhost/replica:0/task:0/cpu:0", send_device="/job:localhost/replica:0/task:0/cpu:0", send_device_incarnation=7747556615882019196, tensor_name="add_13:0", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"]()]]

Caused by op u'Placeholder', defined at:
  File "test.py", line 9, in <module>
    rcn = model.AutopilotRCNModel(n_hidden=n_hidden)
  File "/home/thomas/projects/lstm_autopilot/model.py", line 27, in __init__
    self.c_state = tf.placeholder(tf.float32, shape=[1, n_hidden])
  File "/home/thomas/.local/lib/python2.7/site-packages/tensorflow/python/ops/array_ops.py", line 1330, in placeholder
    name=name)
  File "/home/thomas/.local/lib/python2.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 1762, in _placeholder
    name=name)
  File "/home/thomas/.local/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 748, in apply_op
    op_def=op_def)
  File "/home/thomas/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2388, in create_op
    original_op=self._default_original_op, op_def=op_def)
  File "/home/thomas/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1300, in __init__
    self._traceback = _extract_stack()

InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [1,65]
     [[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=[1,65], _device="/job:localhost/replica:0/task:0/gpu:0"]()]]
     [[Node: _recv_add_13_0 = _Recv[client_terminated=true, recv_device="/job:localhost/replica:0/task:0/cpu:0", send_device="/job:localhost/replica:0/task:0/cpu:0", send_device_incarnation=7747556615882019196, tensor_name="add_13:0", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"]()]]

我的实际数字feed_dict数据与占位符所需的形状和类型相同。不确定这里有什么问题。我认为np.zeros((1,n_hidden), dtype=np.float32)应该很好地提供tf.placeholder(tf.float32,size=[1,n_hidden])

1 个答案:

答案 0 :(得分:1)

我的问题是我正在更新课堂内的占位符,这是一个明确的禁忌。我刚刚改变了

self.c_state = tf.mul(ingate, cgate) + tf.mul(fgate, self.c_state)

self.new_c_state = tf.mul(ingate, cgate) + tf.mul(fgate, self.c_state)

self.h_state = tf.mul(h_rnn1, tf.nn.tanh(self.c_state))

self.new_h_state = tf.mul(h_rnn1, tf.nn.tanh(self.new_c_state))

!!!这一切都有道理。占位符是数据的占位符。不要对它们进行更新操作或事情变得棘手。