您必须使用dtype int32和shape [26,8]输入占位符张量'feat_cate_feature'的值

时间:2018-07-11 03:23:17

标签: python tensorflow

我正在尝试使用张量流构造DeepFM。 但是,tensorflow用以下问题提醒我

You must feed a value for placeholder tensor 'feat_cate_feature' with dtype     int32 and shape [26,8]
 [[Node: feat_cate_feature = Placeholder[dtype=DT_INT32, shape=[26,8], _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]

我检查了提要值和占位符,提要值和占位符的尺寸匹配,所以dtype也是如此。我是使用tensorflow的新手。有人可以帮我吗?非常感谢!!! / p>

import tensorflow as tf
from sklearn.cross_validation import train_test_split
from dataInput import int_input,cate_input,y_label,dataInput
import numpy as np

graph = tf.Graph()


with graph.as_default():
dataInput()

deep_layers = [12,39,312]                   #DNN  three layers
k = 12 + 26   #Dimensions of Vi(  len(int_feature) + len(cate_feature)  )
embeddings_output_size = 39*8              #coding cate_feature to [1,39*8]
tf.set_random_seed(2018)                   #random seed
embeddings = []                            #save embedding results
#Generate Embedding Layer
weights_mat = dict()

#embeddings                               
weights_mat["feature_embeddings"] = tf.Variable(tf.random_normal([26,16,39],0.0,0.01),name="feature_embeddings")#every feature is of 8*k after embeddings
#
#
weights_mat["feature_bias"] = tf.Variable(tf.random_uniform([26,1],0.0,1.0),name="feature_bias")


#              deep  layers
num_layer = len(deep_layers)
input_size = 12                                #input int_features
glorot = np.sqrt(2.0 / 12)



#              layer_0
weights_mat["layer_0"] = tf.Variable(np.random.normal(loc = 0,scale = glorot,size = (input_size,deep_layers[0])),
                                       dtype=np.float32)
weights_mat["bias_0"] = tf.Variable(np.random.normal(loc = 0,scale = glorot,size=(1,deep_layers[0])),
                                     dtype=np.float32)
#              layer_1
weights_mat["layer_1"] = tf.Variable(np.random.normal(loc = 0,scale = glorot,size = (deep_layers[0],deep_layers[1])),
                                       dtype=np.float32)
weights_mat["bias_1"] = tf.Variable(np.random.normal(loc = 0,scale = glorot,size=(1,deep_layers[1])),
                                     dtype=np.float32)
#              layer_2
weights_mat["layer_2"] = tf.Variable(np.random.normal(loc = 0,scale = glorot,size = (deep_layers[1],deep_layers[2])),
                                       dtype=np.float32)
weights_mat["bias_2"] = tf.Variable(np.random.normal(loc = 0,scale = glorot,size=(1,deep_layers[2])),
                                     dtype=np.float32)


#final concat layer  ------concat   embeddings' outputs and deep layers' outputs
input_size = deep_layers[-1] + embeddings_output_size
weights_mat["concat_weight"] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(input_size, 1)),
                                            dtype=np.float32)
weights_mat["concat_bias"] = tf.Variable(tf.constant(0.01),dtype=np.float32)


#######################################################################
##                                                                   ##
##                                                                   ##
##                        DeepFM  Model                              ##
##                                                                   ##
##                                                                   ##
#######################################################################


feat_int_feature = tf.placeholder(tf.int32,shape=[1,12],name="feat_int_feature")
feat_cate_feature = tf.placeholder(tf.int32,shape=[26,8],name="feat_cate_feature")

feat_label = tf.placeholder(tf.float32,name="feat_label")

###                     Models                 ###

#          Cate_feature     embeddings
i = 0
while i < 26:
    embeddings.append(tf.reshape(tf.nn.embedding_lookup(weights_mat["feature_embeddings"][i],feat_cate_feature[i]),[1,8*39]))
    i += 1
embeddings_output = embeddings                      ##Output of embedding layer                [26,[1,8*39(k)]]

#           FM     Layer

#-----------First Order-----------------#
 # None * F * 1
y_first_coma = tf.Variable(np.random.normal(size=(26,1)))
print(weights_mat["feature_bias"])
while i < 26:
    #y_first_order = tf.nn.embedding_lookup(weights_mat["feature_bias"], [i])
    y_first_coma[i] = (tf.add(tf.multiply(weights_mat["feature_bias"][i],embeddings_output[i]),y_first_coma))

y_first_order_out = y_first_coma  # x^1
print(y_first_coma)
#----------Second Order----------------#
# sum_square part
summed_features_emb = tf.reduce_sum(embeddings_output, 1)  # None * K
summed_features_emb_square = tf.square(summed_features_emb)  # None * K

# square_sum part
squared_features_emb = tf.square(embeddings_output)
squared_sum_features_emb = tf.reduce_sum(squared_features_emb, 1)  # None * K

# second order
y_second_order_out = 0.5 * tf.subtract(summed_features_emb_square, squared_sum_features_emb)  # None * K



#          Int_feature      Deep     Process
y_deep = feat_int_feature                               ##Input Layer in DeepLayer Partion
for i in range(0, len(deep_layers)):
    y_deep = tf.add(tf.matmul(tf.cast(y_deep,dtype=tf.float32), weights_mat["layer_%d" %i]), weights_mat["bias_%d"%i]) # None * layer[i] * 1

deepLayer_output = tf.nn.relu(y_deep)               ##Output of DeepLayer-----Calculate the Int_features    [13,[1,26]]


#-------------Deep   +   FM-------------------
#print(y_first_order_out.get_shape())
y_first_order_cast = tf.cast(y_first_order_out,dtype=tf.float64)
y_second_order_cast = tf.cast(y_second_order_out,dtype=tf.float64)
deepLayer_cast = tf.cast(deepLayer_output,dtype=tf.float64)

#concat_input = tf.concat([y_first_order_cast, y_second_order_out, deepLayer_output],axis=1)
#calculating results of the three partions
weights_mat["first_order_weight"] = tf.Variable(np.random.normal(loc = 0,scale = glorot,size = (1,26)))
weights_mat["second_order_weight"]  = tf.Variable(np.random.normal(loc = 0,scale = glorot,size = (26,312)))
weights_mat["deep_order_weight"] = tf.Variable(np.random.normal(loc = 0,scale = glorot,size = (312,1)))

weights_mat["first_order_bias"] = tf.Variable(0.01,dtype=tf.float64)
weights_mat["second_order_bias"] = tf.Variable(np.random.normal(loc = 0,scale = glorot,size = (26,1)))
weights_mat["deep_order_bias"] = tf.Variable(0.01,dtype=tf.float64)

result_of_2_array = []

result_of_1 = tf.add(tf.matmul(weights_mat["first_order_weight"],y_first_order_cast),weights_mat["first_order_bias"])
while i < 26:    
    result_of_2_array.append(tf.add(tf.matmul(tf.expand_dims(y_second_order_cast[i],0),
                                              tf.expand_dims(weights_mat["second_order_weight"][i],1)),
                                    weights_mat["second_order_bias"]))
    i += 1
result_of_2 = tf.reduce_sum(result_of_2_array)
result_of_3 = tf.add(tf.matmul(weights_mat["deep_order_weight"],deepLayer_cast),weights_mat["deep_order_bias"])

model_output = tf.add(tf.add(result_of_1,result_of_2),result_of_3)
#model_output = tf.add(tf.matmul(concat_input, weights_mat["concat_weight"]), weights_mat["concat_bias"])

#-------------Loss----------------------------
model_output = tf.nn.sigmoid(model_output)
loss = tf.losses.log_loss(feat_label, model_output)

#------------Optimizer------------------------
optimizer = tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999,
                                   epsilon=1e-8).minimize(loss)



with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    batch = 50
    accurate = 0
    print(np.array(int_input[0]).shape)
    print(np.array(cate_input[0]).shape)
    for i in range(2000):
        sess.run(optimizer,feed_dict={feat_int_feature:int_input[i],feat_cate_feature:cate_input[i],feat_label:y_label[i]})

        temp = 0
        if sess.run(model_output) - 0.5 >0:
            temp = 1

        if temp == y_label[i]:
            accurate += 1

        if i%50 == 0:
            print("    Accuracy =  \n")
            print(accurate/batch)

0 个答案:

没有答案