这是我正在使用的代码。我试图将结果中的1或0或希望得到一个真实的测试集。当我刚刚分开训练集并在训练集上运行时,我得到了约93%的准确率,但是当我训练程序并在实际测试集上运行时(没有1&0;和0& #39;填写第1列)它只返回nan。
import tensorflow as tf
import numpy as np
from numpy import genfromtxt
import sklearn
# Convert to one hot
def convertOneHot(data):
y=np.array([int(i[0]) for i in data])
y_onehot=[0]*len(y)
for i,j in enumerate(y):
y_onehot[i]=[0]*(y.max() + 1)
y_onehot[i][j]=1
return (y,y_onehot)
data = genfromtxt('cs-training.csv',delimiter=',') # Training data
test_data = genfromtxt('cs-test-actual.csv',delimiter=',') # Actual test data
#This part is to get rid of the nan's at the start of the actual test data
g = 0
for i in test_data:
i[0] = 1
test_data[g] = i
g += 1
x_train=np.array([ i[1::] for i in data])
y_train,y_train_onehot = convertOneHot(data)
x_test=np.array([ i[1::] for i in test_data])
y_test,y_test_onehot = convertOneHot(test_data)
A=data.shape[1]-1 # Number of features, Note first is y
B=len(y_train_onehot[0])
tf_in = tf.placeholder("float", [None, A]) # Features
tf_weight = tf.Variable(tf.zeros([A,B]))
tf_bias = tf.Variable(tf.zeros([B]))
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)
# Training via backpropagation
tf_softmax_correct = tf.placeholder("float", [None,B])
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax))
# Train using tf.train.GradientDescentOptimizer
tf_train_step = tf.train.GradientDescentOptimizer(0.01).minimize(tf_cross_entropy)
# Add accuracy checking nodes
tf_correct_prediction = tf.equal(tf.argmax(tf_softmax,1), tf.argmax(tf_softmax_correct,1))
tf_accuracy = tf.reduce_mean(tf.cast(tf_correct_prediction, "float"))
saver = tf.train.Saver([tf_weight,tf_bias])
# Initialize and run
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
print("...")
# Run the training
for i in range(1):
sess.run(tf_train_step, feed_dict={tf_in: x_train, tf_softmax_correct: y_train_onehot})
#print y_train_onehot
saver.save(sess, 'trained_csv_model')
ans = sess.run(tf_softmax, feed_dict={tf_in: x_test})
print ans
#Print accuracy
#result = sess.run(tf_accuracy, feed_dict={tf_in: x_test, tf_softmax_correct: y_test_onehot})
#print result
当我打印ans
时,我得到以下内容。
[[ nan nan]
[ nan nan]
[ nan nan]
...,
[ nan nan]
[ nan nan]
[ nan nan]]
我不知道我在这里做错了什么。我想要的只是ans
得到一个1,0或者特别是一个概率数组,其中数组中的每个单元的长度都是2。
我不希望很多人能够为我回答这个问题,但请至少尝试一下。我被困在这里等待天才时刻的中风,现在已经2天没有来,所以我想我会问。谢谢!
test_data
看起来像这样 -
[[ 1.00000000e+00 8.85519080e-01 4.30000000e+01 ..., 0.00000000e+00
0.00000000e+00 0.00000000e+00]
[ 1.00000000e+00 4.63295269e-01 5.70000000e+01 ..., 4.00000000e+00
0.00000000e+00 2.00000000e+00]
[ 1.00000000e+00 4.32750360e-02 5.90000000e+01 ..., 1.00000000e+00
0.00000000e+00 2.00000000e+00]
...,
[ 1.00000000e+00 8.15963730e-02 7.00000000e+01 ..., 0.00000000e+00
0.00000000e+00 nan]
[ 1.00000000e+00 3.35456547e-01 5.60000000e+01 ..., 2.00000000e+00
1.00000000e+00 3.00000000e+00]
[ 1.00000000e+00 4.41841663e-01 2.90000000e+01 ..., 0.00000000e+00
0.00000000e+00 0.00000000e+00]]
数据中第一个单位等于1的唯一原因是因为我摆脱了填补该位置的南方以避免错误。请注意,第一列之后的所有内容都是一项功能。第一列是我试图预测的内容。
编辑:
我将代码更改为以下内容 -
import tensorflow as tf
import numpy as np
from numpy import genfromtxt
import sklearn
from sklearn.cross_validation import train_test_split
from tensorflow import Print
# Convert to one hot
def convertOneHot(data):
y=np.array([int(i[0]) for i in data])
y_onehot=[0]*len(y)
for i,j in enumerate(y):
y_onehot[i]=[0]*(y.max() + 1)
y_onehot[i][j]=1
return (y,y_onehot)
#buildDataFromIris()
data = genfromtxt('cs-training.csv',delimiter=',') # Training data
test_data = genfromtxt('cs-test-actual.csv',delimiter=',') # Test data
#for i in test_data[0]:
# print i
#print test_data
#print test_data
g = 0
for i in test_data:
i[0] = 1.
test_data[g] = i
g += 1
#print 1, test_data
x_train=np.array([ i[1::] for i in data])
y_train,y_train_onehot = convertOneHot(data)
#print len(x_train), len(y_train), len(y_train_onehot)
x_test=np.array([ i[1::] for i in test_data])
y_test,y_test_onehot = convertOneHot(test_data)
#for u in y_test_onehot[0]:
# print u
#print y_test_onehot
#print len(x_test), len(y_test), len(y_test_onehot)
#print x_test[0]
#print '1'
# A number of features, 4 in this example
# B = 3 species of Iris (setosa, virginica and versicolor)
A=data.shape[1]-1 # Number of features, Note first is y
#print A
B=len(y_train_onehot[0])
#print B
#print y_train_onehot
tf_in = tf.placeholder("float", [None, A]) # Features
tf_weight = tf.Variable(tf.zeros([A,B]))
tf_bias = tf.Variable(tf.zeros([B]))
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)
tf_bias = tf.Print(tf_bias, [tf_bias], "Bias: ")
tf_weight = tf.Print(tf_weight, [tf_weight], "Weight: ")
tf_in = tf.Print(tf_in, [tf_in], "TF_in: ")
matmul_result = tf.matmul(tf_in, tf_weight)
matmul_result = tf.Print(matmul_result, [matmul_result], "Matmul: ")
tf_softmax = tf.nn.softmax(matmul_result + tf_bias)
print tf_bias
print tf_weight
print tf_in
print matmul_result
# Training via backpropagation
tf_softmax_correct = tf.placeholder("float", [None,B])
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax))
print tf_softmax_correct
# Train using tf.train.GradientDescentOptimizer
tf_train_step = tf.train.GradientDescentOptimizer(0.01).minimize(tf_cross_entropy)
# Add accuracy checking nodes
tf_correct_prediction = tf.equal(tf.argmax(tf_softmax,1), tf.argmax(tf_softmax_correct,1))
tf_accuracy = tf.reduce_mean(tf.cast(tf_correct_prediction, "float"))
print tf_correct_prediction
print tf_accuracy
#saver = tf.train.Saver([tf_weight,tf_bias])
# Initialize and run
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
print("...")
prediction = []
# Run the training
#probabilities = []
#print y_train_onehot
#print '-----------------------------------------'
for i in range(1):
sess.run(tf_train_step, feed_dict={tf_in: x_train, tf_softmax_correct: y_train_onehot})
#print y_train_onehot
#saver.save(sess, 'trained_csv_model')
ans = sess.run(tf_softmax, feed_dict={tf_in: x_test})
print ans
打印完后,我看到其中一个对象是布尔值。我不知道这是不是问题,但请看下面的内容,看看是否有任何方法可以提供帮助。
Tensor("Print_16:0", shape=TensorShape([Dimension(2)]), dtype=float32)
Tensor("Print_17:0", shape=TensorShape([Dimension(10), Dimension(2)]), dtype=float32)
Tensor("Print_18:0", shape=TensorShape([Dimension(None), Dimension(10)]), dtype=float32)
Tensor("Print_19:0", shape=TensorShape([Dimension(None), Dimension(2)]), dtype=float32)
Tensor("Placeholder_9:0", shape=TensorShape([Dimension(None), Dimension(2)]), dtype=float32)
Tensor("Equal_4:0", shape=TensorShape([Dimension(None)]), dtype=bool)
Tensor("Mean_4:0", shape=TensorShape([]), dtype=float32)
...
[[ nan nan]
[ nan nan]
[ nan nan]
...,
[ nan nan]
[ nan nan]
[ nan nan]]
答案 0 :(得分:13)
我不知道直接答案,但我知道如何调试它:tf.Print
。这是一个op值,当tensorflow正在执行时打印该值,并返回张量以进行进一步计算,因此您可以将它们直接洒在模型中。
尝试投入其中一些。而不是这一行:
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)
尝试:
tf_bias = tf.Print(tf_bias, [tf_bias], "Bias: ")
tf_weight = tf.Print(tf_weight, [tf_weight], "Weight: ")
tf_in = tf.Print(tf_in, [tf_in], "TF_in: ")
matmul_result = tf.matmul(tf_in, tf_weight)
matmul_result = tf.Print(matmul_result, [matmul_result], "Matmul: ")
tf_softmax = tf.nn.softmax(matmul_result + tf_bias)
了解Tensorflow认为中间值是什么。如果NaN在管道中早些时候出现,它应该让您更好地了解问题所在。祝好运!如果您从中获得了一些数据,请随时跟进,我们会看看能否为您提供更多信息。
更新添加:这是一个精简的调试版本,在那里我摆脱了输入函数,只生成了一些随机数据:
import tensorflow as tf
import numpy as np
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
x_train=np.random.normal(0, 1, [50,10])
y_train=np.random.randint(0, 10, [50])
y_train_onehot = dense_to_one_hot(y_train, 10)
x_test=np.random.normal(0, 1, [50,10])
y_test=np.random.randint(0, 10, [50])
y_test_onehot = dense_to_one_hot(y_test, 10)
# A number of features, 4 in this example
# B = 3 species of Iris (setosa, virginica and versicolor)
A=10
B=10
tf_in = tf.placeholder("float", [None, A]) # Features
tf_weight = tf.Variable(tf.zeros([A,B]))
tf_bias = tf.Variable(tf.zeros([B]))
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)
tf_bias = tf.Print(tf_bias, [tf_bias], "Bias: ")
tf_weight = tf.Print(tf_weight, [tf_weight], "Weight: ")
tf_in = tf.Print(tf_in, [tf_in], "TF_in: ")
matmul_result = tf.matmul(tf_in, tf_weight)
matmul_result = tf.Print(matmul_result, [matmul_result], "Matmul: ")
tf_softmax = tf.nn.softmax(matmul_result + tf_bias)
# Training via backpropagation
tf_softmax_correct = tf.placeholder("float", [None,B])
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax))
# Train using tf.train.GradientDescentOptimizer
tf_train_step = tf.train.GradientDescentOptimizer(0.01).minimize(tf_cross_entropy)
# Add accuracy checking nodes
tf_correct_prediction = tf.equal(tf.argmax(tf_softmax,1), tf.argmax(tf_softmax_correct,1))
tf_accuracy = tf.reduce_mean(tf.cast(tf_correct_prediction, "float"))
print tf_correct_prediction
print tf_accuracy
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1):
print "Running the training step"
sess.run(tf_train_step, feed_dict={tf_in: x_train, tf_softmax_correct: y_train_onehot})
#print y_train_onehot
#saver.save(sess, 'trained_csv_model')
print "Running the eval step"
ans = sess.run(tf_softmax, feed_dict={tf_in: x_test})
print ans
您应该看到以“Bias:”等开头的行
答案 1 :(得分:6)
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax))
这是我正在测试的项目的问题。具体而言,它最终为0 * log(0),产生nan。
如果将其替换为:
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax + 1e-50))
它应该避免这个问题。
我也使用了reduce_mean而不是reduce_sum。如果将批量大小加倍并使用reduce_sum,则会使成本加倍(以及渐变的大小)。除了使用tf.print(打印到控制台上打开tensorfow)之外,它还可以在改变批量大小时更具可比性。
具体来说,这就是我现在在调试时使用的内容:
cross_entropy = -tf.reduce_sum(y*tf.log(model + 1e-50)) ## avoid nan due to 0*log(0)
cross_entropy = tf.Print(cross_entropy, [cross_entropy], "cost") #print to the console tensorflow was started from