张量流中mnist csv数据的准确性较差

时间:2017-04-25 13:14:32

标签: csv tensorflow mnist

我试图使用csv数据为初学者创作。 我从here获得了csv数据,并使每个标签成为一个热门向量。 每行有794个目标(colum1~10作为标签,11~794作为像素)。 这是我写的代码导致可怕的准确性。

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys

import tensorflow as tf
import numpy      as np

FLAGS = None

def main(_):
  # Import data
  def csv_to_numpy_array(filepath, delimiter):
      return np.genfromtxt(filepath,delimiter=delimiter, dtype=None)

  def import_data():
      print("loading training data")
      traindata = csv_to_numpy_array("data/mnist_train_onehot.csv",delimiter=",")
      [trainY, trainX] = np.hsplit(traindata,[10]);
      print("loading test data")
      [testY, testX] = np.hsplit(testdata,[10]);
      return trainX, trainY, testX, testY

  x_train, y_train, x_test, y_test = import_data()

  numX = x_train.shape[1] #784
  numY = y_train.shape[1] #10

  # Prepare the placeholder 
  x = tf.placeholder(tf.float32, [None, numX]) #input box
  y_ = tf.placeholder(tf.float32, [None, numY]) #output box

  #define weight and biases
  w = tf.Variable(tf.zeros([numX,numY]))
  b = tf.Variable(tf.zeros([numY]))

  #create the model
  def model(X, w, b):
      pyx = tf.nn.softmax(tf.matmul(X, w) + b)
      return pyx

  y = model(x, w, b)

  #cost function 
  loss = -tf.reduce_sum(y_*tf.log(y))
  # the loss and acc
  cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y),reduction_indices=[1]))
  train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
  init = tf.initialize_all_variables()
  correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

  sess = tf.InteractiveSession()
  tf.global_variables_initializer().run()

  # Train
  for i in range(1000):
      ind = np.random.choice(100,100)
      x_train_batch = x_train[ind]
      y_train_batch = y_train[ind]
        #run optimization op (backprop) and cost op (to get loss value)
      _,c = sess.run([train_step, loss], feed_dict={x: x_train_batch, y_: y_train_batch})
      if i % 50 == 0:
          train_acc = accuracy.eval({x: x_train_batch, y_: y_train_batch})
          print('step: %d, acc: %6.3f' % (i, train_acc) )

  # Test trained model
  print(sess.run(accuracy, feed_dict={x: x_test,
                                      y_: y_test}))

if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
                      help='Directory for storing input data')
  FLAGS, unparsed = parser.parse_known_args()
  tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)

准确度为0.098pt。 有人可以试试这段代码并告诉我这段代码有什么问题吗? 非常感谢你提前。

2 个答案:

答案 0 :(得分:0)

可能存在问题:

1-随机初始化变量而不是零

2-您可能会误解.csv文件格式,其中包含.csv文件,格式为label, pix-11, pix-12, pix-13, ...

3-尝试使用tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( logits , llabels ))用于计算损失的方法在数值上不稳定。 更新:在这种情况下不要使用tf.nn.softmax因为tf.nn.softmax_cross_entropy_with_logits内部有softmax规范化器和交叉熵。(感谢@ ml4294评论)

答案 1 :(得分:0)

以下是您需要更改的代码。具体来说,您可以使用 tf.nn.softmax_cross_entropy_with_logits 来为您计算交叉熵。另一项改进是使用 loss = tf.reduce_mean ... 而不是 loss = tf.reduce_sum ... * 。这将使您的训练校正成为所有错误的平均值,而不是总和。如果你使用总和,你会得到狂野和不受控制的训练波动,你必须使用一个非常小的梯度下降因子进行补偿。如果你发现必须使用梯度下降高于1或低于.1的东西,你可以通过使用reduce_mean来解决这个问题。

这是你的代码。

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys

import tensorflow as tf
import numpy      as np

FLAGS = None

def main(_):
  # Import data
  def csv_to_numpy_array(filepath, delimiter):
      return np.genfromtxt(filepath,delimiter=delimiter, dtype=None)

  def import_data():
      print("loading training data")
      traindata = csv_to_numpy_array("data/mnist_train_onehot.csv",delimiter=",")
      [trainY, trainX] = np.hsplit(traindata,[10]);
      print("loading test data")
      [testY, testX] = np.hsplit(testdata,[10]);
      return trainX, trainY, testX, testY

  x_train, y_train, x_test, y_test = import_data()

  numX = x_train.shape[1] #784
  numY = y_train.shape[1] #10

  # Prepare the placeholder 
  x = tf.placeholder(tf.float32, [None, numX]) #input box
  y_ = tf.placeholder(tf.float32, [None, numY]) #output box

  #define weight and biases
  w = tf.Variable(tf.zeros([numX,numY]))
  b = tf.Variable(tf.zeros([numY]))

  y = tf.matmul(x, w) + b

  # unused for this model
  keep_prob = tf.placeholder(tf.float32)

  loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
  train = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

  # Test trained model
  correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  percent_correct = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

  init = tf.initialize_all_variables()

  sess = tf.InteractiveSession()
  tf.global_variables_initializer().run()

  # Train
  for i in range(1000):
      ind = np.random.choice(x_train.shape[0],100)
      x_train_batch = x_train[ind]
      y_train_batch = y_train[ind]
        #run optimization op (backprop) and cost op (to get loss value)
      _,c = sess.run([train_step, loss], feed_dict={x: x_train_batch, y_: y_train_batch})
      if i % 50 == 0:
          train_acc = percent_correct.eval({x: x_train_batch, y_: y_train_batch})
          print('step: %d, acc: %6.3f' % (i, train_acc) )

  # Test trained model
  print(sess.run(percent_correct, feed_dict={x: x_test,
                                      y_: y_test}))

if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
                      help='Directory for storing input data')
  FLAGS, unparsed = parser.parse_known_args()
  tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)

这是一个链接到一些脚本中的几个不同的mnist tensorflow架构https://github.com/panchishin/learn-to-tensorflow/blob/master/examples/mnist_model_comparison.py