我正在编写一个神经元网络程序,该代码可以运行,但是我的网络精度始终为1.0。我尝试更改激活功能,但没有任何变化,我的代码有什么问题?
我使用以下功能随机获取数据
#randomly choose data
def random_data():
num = random.randint(0,len(result)-1)
return result[num]
statue=[[0,0,0]]
以下显示了我的网络的构建
#network definition
def Net(X):
global statue
X1,X2=tf.split(X,[6,3],1)
X = tf.concat([X1,statue],1)
We1,We2=tf.split(We['h3'],[1,3],1)
WW1,WW2 = tf.split(We1,[2,2],0)
WW1=[[1.],[1.]]
WW2=[[1.],[1.]]
We1=tf.concat([WW1,WW2],0)
We['h3'] = tf.concat([We1,We2],1)
Wx_plus_b00 = tf.matmul(X, We['h1']) + bi['b1']
l00 = tf.nn.sigmoid(Wx_plus_b00)
Wx_plus_b01 = tf.matmul(X, We['h1']) + bi['b1']
l01 = tf.nn.sigmoid(Wx_plus_b01)
l0 = tf.concat([l01,l00],1)
Weights1 = tf.Variable(tf.random_normal([16, 4]))
biases1 = tf.Variable(tf.zeros([1, 4]) + 0.1)
Wx_plus_b1 = np.array(tf.matmul(l0, Weights1) + biases1)
N1act = 2/(1+pow(math.e,-Wx_plus_b1))-1
l1 = tf.nn.sigmoid(N1act)
Wx_plus_b3 = tf.matmul(l1, We['h3']) + bi['b3']
Wx_plus_b31,Wx_plus_b32=tf.split(Wx_plus_b3,[1,3],1)
statue = Wx_plus_b32
N3act = 2/(1+pow(math.e,-Wx_plus_b31))-1
prediction = tf.nn.softsign(N3act)
return prediction
xs = tf.placeholder(tf.float32, [1, 9])
ys = tf.placeholder(tf.float32, [1, 1])
myNet = Net(xs)
pred = myNet
cost =
tf.reduce_mean(tf.nn.softmax_cross_entroy_with_logits(labels=ys,logits\
=pred))
optm=tf.train.GradientDescentOptimizer(0.01).minimize(cost)
这是我的准确性函数
#accuracy
accr=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred,1), tf.argmax(ys,1)),
tf.float32))
init = tf.global_variables_initializer()
这是我的主要功能
#main function
with tf.Session() as sess:
sess.run(init)
loadData('ABCD.txt')
step = 1
for i in range(10000):
batch = random_data()
x_input0 = np.array(batch[0]).reshape(1,9)
y_output = np.array(batch[1]).reshape(1,1)
feeds = {xs: x_input0, ys: y_output}
sess.run(optm, feed_dict=feeds)
sess.run
step =step + 1
print('accuracy :',sess.run(accr, feed_dict={xs: x_input0, ys:\ y_output}))