提高Tensorflow神经网络的准确性 - 蟒蛇

时间:2017-03-15 08:32:27

标签: python tensorflow neural-network classification

这是我的第一个问题的延续:Receiving random cost output on tensorflow regression- python

我正在使用多层感知器ANN来根据其他观察数据预测细菌样本的Phyla。每次我运行我的代码时,我得到的精度为0.数据集不是最好的,因为有很多NaN(已被0替换),但我期望总比没有好。我正在寻求调试和提高准确性的帮助

我目前使用的数据集可以在这里找到: https://github.com/z12332/tensorflow-test-1/blob/master/export.csv

这是我目前的代码:

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

from tensorflow.contrib import learn

from sklearn.pipeline import Pipeline
from sklearn import datasets, linear_model
import numpy as np


df = pd.read_csv('/Users/zach/desktop/export.csv')
data_ = df.drop(['ID'], axis=1)


n_classes = data_["Phylum"].nunique()

inputY = pd.get_dummies(data_['Phylum'])

dim = 19
learning_rate = 0.000001
display_step = 50
n_hidden_1 = 500
n_hidden_2 = 500
n_hidden_3 = 500
n_hidden_4 = 500

X = tf.placeholder(tf.float32, [None, dim])


train_X = data_.iloc[:2000, :-1].as_matrix()
train_X = pd.DataFrame(data=train_X)
train_X = train_X.fillna(value=0).as_matrix()

train_Y = inputY.iloc[:2000].as_matrix()
train_Y = pd.DataFrame(data=train_Y)
train_Y = train_Y.fillna(value=0).as_matrix()

test_X = data_.iloc[2000:, :-1].as_matrix()
test_X = pd.DataFrame(data=test_X)
test_X = test_X.fillna(value=0).as_matrix()

test_Y = inputY.iloc[2000:].as_matrix()
test_Y = pd.DataFrame(data=test_Y)
test_Y = test_Y.fillna(value=0).as_matrix()

n_samples = train_Y.size
total_len = train_X.shape[0]
n_input = train_X.shape[1]
batch_size = 10


W = tf.Variable(tf.zeros([dim, n_classes]))
b = tf.Variable(tf.zeros([n_classes]))


def multilayer_perceptron(x, weights, biases):
    # Hidden layer with RELU activation
    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
    layer_1 = tf.nn.relu(layer_1)

    # Hidden layer with RELU activation
    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
    layer_2 = tf.nn.relu(layer_2)

    # Hidden layer with RELU activation
    layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
    layer_3 = tf.nn.relu(layer_3)

    # Hidden layer with RELU activation
    layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
    layer_4 = tf.nn.relu(layer_4)

    # Output layer with linear activation
     out_layer = tf.matmul(layer_4, weights['out']) + biases['out']
     return out_layer

# Store layers weight & bias
weights = {
    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 0.1)),
    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], 0, 0.1)),
    'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3], 0, 0.1)),
    'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4], 0, 0.1)),
    'out': tf.Variable(tf.random_normal([n_hidden_4, n_classes], 0, 0.1))
}

biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 0.1)),
    'b2': tf.Variable(tf.random_normal([n_hidden_2], 0, 0.1)),
    'b3': tf.Variable(tf.random_normal([n_hidden_3], 0, 0.1)),
    'b4': tf.Variable(tf.random_normal([n_hidden_4], 0, 0.1)),
    'out': tf.Variable(tf.random_normal([n_classes], 0, 0.1))
}

# Construct model
pred = multilayer_perceptron(X, weights, biases)

y = tf.placeholder(tf.float32, [None, n_classes])
cost = -tf.reduce_sum(y*tf.log(tf.clip_by_value(pred,1e-10,1.0)))
optimizer =     tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
hm_epochs = 500

init = tf.initialize_all_variables()
with tf.Session() as sess:
    sess.run(init) 

    for epoch in range(hm_epochs):
        avg_cost = 0
        total_batch = int(total_len/batch_size)
        for i in range(total_batch-1):
            batch_x = train_X[i*batch_size:(i+1)*batch_size]
            batch_y = train_Y[i*batch_size:(i+1)*batch_size]

            _, c, p = sess.run([optimizer, cost, pred], feed_dict={X: batch_x,   
                                                               y: batch_y})
        avg_cost += c / total_batch

    label_value = batch_y
    estimate = p
    err = label_value-estimate

    if epoch % display_step == 0:
        print ("Epoch:", '%04d' % (epoch+1), "cost=", \
            "{:.9f}".format(avg_cost))
        print ("[*]----------------------------")
        for i in xrange(3):
            print ("label value:", label_value[i], \
                    "estimated value:", estimate[i])
        print ("[*]============================")

print ("Optimization Finished!")

# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Accuracy:", accuracy.eval({X: test_X, y: test_Y}))

我的输出如下:

Epoch: 0451 cost= 72.070914993
[*]----------------------------
label value: [0 1 0 0 0] estimated value: [ 1184.01843262 -1293.13989258    99.68536377   655.67803955  -833.19824219]
label value: [0 1 0 0 0] estimated value: [ 1183.1940918  -1273.7635498     95.80528259   656.42572021  -841.03656006]
label value: [0 1 0 0 0] estimated value: [ 1183.55383301 -1304.3470459     96.90409088   660.52886963  -838.37719727]
[*]============================
Optimization Finished!
Accuracy: 0.0

1 个答案:

答案 0 :(得分:0)

我明白了!为了提高准确性,有必要将列车和测试批次分解为随机样本,否则网络将不会处理必要的数据并且将失败。我已经通过重写数据格式化部分来实现这一点:

df = pd.read_csv('/Users/zach/desktop/export.csv')
data_ = df.drop(['ID','Species'], axis=1)


n_classes = data_["Phylum"].nunique()

dim = 18
learning_rate = 0.0001
display_step = 10
n_hidden_1 = 2000
n_hidden_2 = 1500
n_hidden_3 = 1000
n_hidden_4 = 500

X = tf.placeholder(tf.float32, [None, dim])

train_set = data_.sample(frac=0.75) #THIS ADDITION SPLITS THE DATA RANDOMLY AND TAKE 75% FOR TRAINING
test_set = data_.loc[~data_.index.isin(train_set.index)] #THIS TAKES THE REMAINING DATA FOR TESTING

train_size = train_set.size

inputY_test = pd.get_dummies(test_set['Phylum'])
inputY_train = pd.get_dummies(train_set['Phylum'])

train_X = train_set.iloc[:train_size, :-1].as_matrix()
train_X = pd.DataFrame(data=train_X)
train_X = train_X.fillna(value=0).as_matrix()

train_Y = inputY_train.as_matrix()
train_Y = pd.DataFrame(data=train_Y)
train_Y = train_Y.fillna(value=0).as_matrix()

test_X = test_set.iloc[:, :-1].as_matrix()
test_X = pd.DataFrame(data=test_X)
test_X = test_X.fillna(value=0).as_matrix()

test_Y = inputY_test.as_matrix()
test_Y = pd.DataFrame(data=test_Y)
test_Y = test_Y.fillna(value=0).as_matrix()

通过这些编辑,简单运行50个时期,大约需要2分钟,预测正确的结果,准确度为91.4%