InvalidArgumentError:您必须使用dtype float为占位符张量'Placeholder'提供值

时间:2018-03-20 07:43:04

标签: python tensorflow

我遇到了错误:“InvalidArgumentError:您必须使用dtype float为占位符张量'占位符'提供值。” 我不知道为什么会这样。

我想为句子分类制作CNN模型。现在我将随机数据设置为训练数据。由于存在文本数据,在“重塑”数据之后不能像MNIST数据那样变成方阵。相反,它是一个10 * 100矩阵。也许这是我的代码的问题。

import tensorflow as tf
import numpy as np
from numpy.random import RandomState

# sess = tf.InteractiveSession()

x = tf.placeholder(tf.float32, shape=[None,1000], name='x_input')
y_ = tf.placeholder(tf.float32, shape=[None,2], name='y_input')


# x = tf.image.convert_image_dtype(x,tf.float32)  



# Define funstions.
def weight_variable(shape):
    initial = tf.truncated_normal(shape,stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1,shape=shape)
    return tf.Variable(initial)


def conv2d(x,W,m,n):
    return tf.nn.conv2d(x,W,strides=[1,m,n,1],padding = 'SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')


x_1 = tf.reshape(x, [-1,10,100,1])


# Convolution Layer #1
W_conv1 = weight_variable([2,20,1,25])
b_conv1 = bias_variable([25])
h_conv1 = tf.nn.relu(conv2d(x_1,W_conv1,1,1)+b_conv1)

# Pooling Layer #1
h_pool1 = tf.nn.max_pool(h_conv1,ksize=[1,2,20,1],strides=[1,1,1,1],padding='VALID')

# Convolution Layer #2
W_conv2 = weight_variable([3,9,25,100])
b_conv2 = bias_variable([100])
h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2,3,9)+b_conv2)

# Pooling Layer #2
h_pool2 = tf.nn.max_pool(h_conv2,ksize=[1,3,3,1],strides=[1,3,3,1],padding='VALID')


# Dense Layer 
pool_shape = h_pool2.get_shape().as_list()
nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]

W_fc1 = weight_variable([nodes,200])
b_fc1 = bias_variable([200])

h_pool2_flat = tf.reshape(h_pool2,[-1,nodes])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)

# Dropout regulation
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)


# Logits Layer
W_fc2 = weight_variable([200,2])
b_fc2 = bias_variable([2])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2)



# Back propagation
cross_entropy = -tf.reduce_mean(
        y_ * tf.log(tf.clip_by_value(y_conv,1e-10,1.0)))
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)


# -----------------------------------------------------------------------------
# Initialization
rdm = RandomState(1)
dataset_size = 128
X = rdm.rand(dataset_size,1000)
Y = [[int(x1<0.5),1-int(x1<0.5)]for x1 in X[:,1]]


# Create a Session
with tf.Session() as sess:
    init_op = tf.initialize_all_variables()
    sess.run(init_op)

    steps = 1000
    batch_size = 8
    for i in range(steps):
        start  = (i*batch_size) % dataset_size
        end = min(start + batch_size, dataset_size)

        sess.run(train_step,feed_dict={x:X[start:end], y_:Y[start:end]})

        if i%1000 ==0:
            total_cross_entropy = sess.run(
                    cross_entropy, feed_dict={x:X,y_:Y})
            print("After %d training step(s), cross entropy on all data is %g"%
                  (i,total_cross_entropy))

感谢阅读。:)

如何解决这个问题?

1 个答案:

答案 0 :(得分:0)

您需要为keep_prob提供一个值,该值是dtype float的占位符。

相关问题