import tensorflow as tf
import numpy as np
#Constant Declaration
LEARNING_RATE = 0.05
LEARNING_TIME = 10000
FILE_NAME = 'xor'
# Input Data Declaration
xy = np.loadtxt(FILE_NAME+'_data_set.txt',unpack=True,dtype='float32',delimiter=',')
x_data = np.transpose(xy[0:-1])
y_data = np.transpose(xy[-1])
print x_data
# Declaration Part
X = tf.placeholder(dtype = tf.float32,name="X-input")
Y = tf.placeholder(dtype = tf.float32,name="Y-input")
W1 = tf.Variable(tf.random_uniform([2,2],-1.0,1.0), name="Weight_1")
W2 = tf.Variable(tf.random_uniform([2,1],-1.0,1.0), name="Weight_2")
b1 = tf.Variable(tf.zeros([2]), name = 'Bias1')
b2 = tf.Variable(tf.zeros([1]), name = 'Bias2')
# Formula Part
with tf.name_scope("Layer1") as scope:
L1 = tf.sigmoid(tf.matmul(X,W1) + b1)
with tf.name_scope("Layer2") as scope:
hypothesis = tf.sigmoid(tf.matmul(L1,W2) + b2)
with tf.name_scope("Cost") as scope:
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis))
cost_summ = tf.scalar_summary("cost",cost)
# Minimizing Part
a = tf.Variable(LEARNING_RATE)
with tf.name_scope("train") as scope:
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)
# Initializing Part
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter('./logs/'+FILE_NAME,sess.graph_def)
# Running Part
for step in range(LEARNING_TIME):
sess.run(train,feed_dict = {X:x_data,Y:y_data})
if step % 20 == 0:
print step, sess.run(cost,feed_dict = {X:x_data,Y:y_data}), sess.run(W2)
summary = sess.run(merged, feed_dict = {X:x_data,Y:y_data})
writer.add_summary(summary, step)
#Test Part
correction = tf.equal(tf.floor(hypothesis + 0.5),Y)
accuracy = tf.reduce_mean(tf.cast(correction,'float'))
print sess.run([hypothesis,tf.floor(hypothesis + 0.5),correction,accuracy], feed_dict = {X:x_data,Y:y_data})
上面是我解决xor逻辑的张量流代码。但问题是准确度只有50%。并且费用为0.69321。 实际上我已经看到很多代码解决了在tensorflow中实现的xor,我找不到有什么问题。
下面是图片,表明我的代码是如何工作的。