XOR多层感知器输出“无”作为最终结果?

时间:2018-08-18 05:06:29

标签: python neural-network

我不确定是否正确定义了所有内容,但是由于某种原因,我无法使其停止打印“无”而不是实际值作为最终输出。 我什至尝试使用Relu函数而不是Sigmoid,但这只是一个疯狂的猜测。除此之外,代码看起来非常简单

#imports
import random
import numpy as np
import tensorflow as tf
#Variables
hidden_layer1_node= 2
hidden_layer2_node= 1
output = 1

x = tf.placeholder(tf.float32,[4,2])
y = tf.placeholder(tf.float32,[4,1])
#neural model
def neural_model():
    #2 input by 2 neuron, 2 bias for two neuron
    layer1_weight = {'weight':tf.Variable(tf.random_normal([2,hidden_layer1_node])),
                'bias':tf.Variable(tf.random_normal([hidden_layer1_node]))}
    #2 input by 1 neuron, 1 bias for 1 neuron
    layer2_weight = {'weight':tf.Variable(tf.random_normal([2,hidden_layer2_node])),
                'bias':tf.Variable(tf.random_normal([hidden_layer2_node]))}

    #z value for first layer
    zl1 = tf.add(tf.matmul(x,layer1_weight['weight']), layer1_weight['bias'])
    #prediction for first layer
    prediction1 = tf.sigmoid(zl1)

    #z value for second layer
    zl2 = tf.add(tf.matmul(zl1,layer2_weight['weight']), layer2_weight['bias'])
    #prediction for first layer
    prediction2 = tf.sigmoid(zl2)

    return prediction2;


#cost function
def cost_function():
    prediction = neural_model()
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y))
    return loss

#Optimization
loss = cost_function()
training = tf.train.GradientDescentOptimizer(0.05).minimize(loss)

#training stage
train_x = [[0,0],[0,1],[1,0],[1,1]]
train_y = [[0],[1],[1],[0]]

initialize = tf.global_variables_initializer()

#forward feed + back prop 4 times
epoch = 4

with tf.Session() as sess:
    #init all tf variable
    sess.run(initialize)

    for i in range(epoch):
        for _ in range(5000):
            c = sess.run(training, feed_dict={x:train_x,y:train_y})

        print(c)    

#Output
#None
#None
#None
#None

1 个答案:

答案 0 :(得分:0)

#def neural_model():
    layer1_weight = {'weight':tf.Variable(tf.random_normal([2,hidden_layer1_node])),
                'bias':tf.Variable(tf.zeros([hidden_layer1_node]))}

    layer2_weight = {'weight':tf.Variable(tf.random_normal([2,hidden_layer2_node])),
                'bias':tf.Variable(tf.zeros([hidden_layer2_node]))}


    zl1 = tf.add(tf.matmul(x,layer1_weight['weight']), layer1_weight['bias'])
    prediction1 = tf.sigmoid(zl1)

    zl2 = tf.add(tf.matmul(prediction1,layer2_weight['weight']), layer2_weight['bias'])
    return tf.sigmoid(zl2)

prediction = neural_model()

#cost function
def cost_function():
    loss = tf.reduce_mean(-1*((y*tf.log(prediction))+((1-y)*tf.log(1.0-prediction))))
    return loss

#Optimization
loss = cost_function()
training = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

#training stage
train_x = [[0,0],[0,1],[1,0],[1,1]]
train_y = [[0],[1],[1],[0]]

initialize = tf.global_variables_initializer()
epoch = 4

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    for i in range(epoch):
        for _ in range(5000):
            sess.run(training, feed_dict={x:train_x,y:train_y})

        print(sess.run(loss,feed_dict={x:train_x,y:train_y}))

    print(sess.run(prediction,feed_dict={x:train_x,y:train_y}))