在玩了tensorflow之后,我终于能够让我的代码运行但是损失似乎是负面的,即使只有很少的时代。为什么会这样?我只用numpy做了简单的神经网络,从来没有遇到过这个问题,有人可以解释我做错了什么吗?
这是输入文件,如果有人想测试它:input file
附加信息:标签不是onehot数组,我的损失函数是正确的,因为它不是onehot数组?我遵循了使用onehot数组标签的教程。
import tensorflow as tf
import numpy as np
data = np.load("test_data.npz")
trng_input = np.array(data['Input'], dtype=np.float64)
trng_output = np.array(data['Output'], dtype=np.float64)
nhl1 = 12
nhl2 = 8
n_classes = 4
x = tf.placeholder(dtype=tf.float64, shape=[len(trng_input),24])
y = tf.placeholder(dtype=tf.float64, shape=[len(trng_output),n_classes])
def NN(data):
hl1 = {"weights":tf.Variable(tf.random_normal([24, nhl1], dtype=tf.float64)),
"biases":tf.Variable(tf.random_normal([nhl1], dtype=tf.float64))}
hl2 = {"weights":tf.Variable(tf.random_normal([nhl1, nhl2], dtype=tf.float64)),
"biases":tf.Variable(tf.random_normal([nhl2], dtype=tf.float64))}
output_layer = {"weights":tf.Variable(tf.random_normal([nhl2, n_classes], dtype=tf.float64)),
"biases":tf.Variable(tf.random_normal([n_classes], dtype=tf.float64))}
l1 = tf.add(tf.matmul(data, hl1["weights"]), hl1["biases"])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hl2["weights"]), hl2["biases"])
l2 = tf.nn.relu(l2)
output = tf.add(tf.matmul(l2, output_layer["weights"]), output_layer["biases"])
output = tf.nn.relu(output)
return output
def train(data, epochs):
prediction = NN(data)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
_, c = sess.run([optimizer, cost], feed_dict={x: trng_input, y: trng_output})
if not epoch % 20:
print(F"Epoch {epoch} completed out of {epochs}. Loss:{c}")
correct = tf.equal(tf.argmax(prediction,1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct,"float"))
Eval = accuracy.eval({x:trng_input, y:trng_output})
print(F"Accuracy:{Eval}")
train(trng_input, 200)