即使在非常简单的数据集中,Tensorflow神经网络也具有高误差

时间:2017-05-24 06:24:41

标签: tensorflow neural-network

我试图为回归问题实现1个隐藏层NN。即使对于非常简单的数据,损失函数也会在几次迭代中得到改善,而不会遇到非常高的误差。有人可以帮我找到这个bug吗?这是我的代码:

import tensorflow as tf
import scipy.io as sio
import numpy as np
reuse_weights = 1
n_nodes_hl1 = 10
batch_size  = 200
hm_epochs = 20

# load input from matlab 
input_training = sio.loadmat('xMat.mat')
input_training = input_training['xMat']
input_test = sio.loadmat('xMat.mat')
input_test = input_test['xMat']

# find number of measurements and input length
n_measurements = input_training.shape[0]
input_length = input_training.shape[1]

# current input
data_y = input_training[:, input_length - 1].astype(float)
data_x = input_training[:, 0 : input_length - 1].astype(float)
test_data_y = input_test[:, input_length - 1].astype(float)
test_data_x = input_test[:, 0 : input_length - 1].astype(float)

x = tf.placeholder('float32',[None, input_length - 1])
y = tf.placeholder('float32')

# place holder for Dropout algorithm drop probability
keep_prob = tf.placeholder('float32')

def next_batch(data):
    """
    Return a total of `batch_size` samples from the array `data`. 
    """
    if len(data.shape) == 2:
        idx = np.arange(0, len(data[:,0]))  # get all possible indexes
    else:
        idx = np.arange(0, len(data))  # get all possible indexes

    np.random.shuffle(idx)  # shuffle indexes
    idx = idx[0:batch_size]  # use only `batch_size` random indexes

    if len(data.shape) == 2:
        data_shuffle = [data[i,:] for i in idx]  # get list of `batch_size` random samples
    else:
        data_shuffle = [data[i] for i in idx]  # get list of `batch_size` random samples

    data_shuffle = np.asarray(data_shuffle)  # get back numpy array
    return data_shuffle

def neural_network_model(data, weights, biases, keep_prob):
    layer1 = tf.add(tf.matmul(data, weights['h1']), biases['b1'])
    layer1 = tf.nn.sigmoid(layer1)
    output = tf.add(tf.matmul(layer1, weights['out']), biases['out'])

    return output

if reuse_weights:
    weights = {
    'h1': tf.Variable(sio.loadmat('weights_h1.mat')['weights_h1'], name="weights_h1"),
    'out': tf.Variable(sio.loadmat('weights_out.mat')['weights_out'], name="weights_out")
    }
    biases = {
    'b1': tf.Variable(sio.loadmat('biases_b1.mat')['biases_b1'], name="biases_b1"),
    'out': tf.Variable(sio.loadmat('biases_out.mat')['biases_out'], name="biases_out")
    }
else: # initialize weights
    weights = {
    'h1': tf.Variable(tf.random_normal([input_length - 1, n_nodes_hl1]), name="weights_h1"),
    'out': tf.Variable(tf.random_normal([n_nodes_hl1, 1]), name="weights_out")
    }
    biases = {
    'b1': tf.Variable(tf.random_normal([n_nodes_hl1]), name="biases_b1"),
    'out': tf.Variable(tf.random_normal([1]), name="biases_out")
    }

def train_neural_network(x):
    prediction = neural_network_model(x, weights, biases, keep_prob)[:,0]   
    cost = tf.reduce_mean(tf.abs(prediction - y))
    optimizer = tf.train.AdamOptimizer()
    opt = optimizer.minimize(cost)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print(weights['h1'])

        for epoch in range(hm_epochs): #training
            epoch_loss = 0
            for _ in range(int(n_measurements/batch_size)):
                _, c, p = sess.run([opt, cost, prediction], feed_dict = {x:next_batch(data_x),\
                y:next_batch(data_y) , keep_prob : 1.0})
                epoch_loss += c
            print('Epoch', epoch, 'completed out of', hm_epochs, 'Average loss:', epoch_loss/int(n_measurements/batch_size))

        # prediction
        accuracy = tf.reduce_mean(tf.abs(prediction - y))

        # Feed 1.0 for keep prob during testing
        print("Training data accuracy:", accuracy.eval({x: data_x, y: data_y, keep_prob : 1.0}))
        print("Training data predictions:", prediction.eval({x: data_x[0:5,:], keep_prob : 1.0}))
        print("Training data:",data_y[0:5])
        #print("Test data accuracy:", accuracy.eval({x: test_data_x, y: test_data_y, keep_prob : 1.0}))


        # save numpy arrays
        sio.savemat('weights_h1.mat', {'weights_h1': weights['h1'].eval()})
        sio.savemat('biases_b1.mat', {'biases_b1': biases['b1'].eval()})
        sio.savemat('weights_out.mat', {'weights_out': weights['out'].eval()})
        sio.savemat('biases_out.mat', {'biases_out': biases['out'].eval()})

train_neural_network(x)

1 个答案:

答案 0 :(得分:0)

想出来,问题在于数据改组。输入和响应被不同地混洗(每个时期的随机混洗两次),因此每个时期的输入数据不对应于响应数据。