Tensorflow DNN总是不适合我的数据集

时间:2019-03-27 19:07:11

标签: python tensorflow neural-network deep-learning

我正在尝试使用低水平的Tensorflow API来构建一个深度神经网络,尽管当我训练模型并对其进行测试时,与其他模型相比,测试集和训练集的损失和严重程度非常相似且非常高我尝试过的模型(例如随机森林,AdaBoost决策树)。我什至使用keras进行了相同的dnn,它给了我更好的结果,我不理解问题,我在机器学习方面没有太多经验

tensorflow代码

# load libraries
import tensorflow as tf

# reset graph
tf.reset_default_graph()


# define variables

n_hidden1 = 200
n_outputs = 1


with tf.device("/gpu:0"):
    X = tf.placeholder(tf.float32, shape=(None, n_features), name="X")
    y = tf.placeholder(tf.float32, shape=(None), name="y")

    with tf.name_scope("dnn"):
        hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1",
                                  activation=tf.nn.leaky_relu)
        logits = tf.layers.dense(hidden1, n_outputs, name="logits")

with tf.device("/cpu:0"):
    with tf.name_scope("loss"):
        loss = tf.reduce_mean(tf.abs(logits - y), name="loss")


with tf.device("cpu:0"):
    with tf.name_scope("learning_rate"):
        learning_rate = 0.001

with tf.device("/gpu:0"):
    with tf.name_scope("train"):
        optimizer = tf.train.AdamOptimizer(learning_rate)
        training_op = optimizer.minimize(loss)

with tf.device("/gpu:0"):        
    with tf.name_scope("eval"):
        mae = tf.reduce_mean(tf.abs(logits - y), name="mae")

def shuffle_batch(X, y, batch_size):
    rnd_idx = np.random.permutation(len(X))
    n_batches = len(X) // batch_size
    for batch_idx in np.array_split(rnd_idx, n_batches):
        X_batch, y_batch = X[batch_idx], y[batch_idx]
        yield X_batch, y_batch
n_epochs = 200
batch_size = 1000
n_batches = int(np.ceil(X_train.shape[0] / batch_size))

# create graph variables initializer 
init = tf.global_variables_initializer()

# create model saver
saver = tf.train.Saver()

# set device to gpu
with tf.device("/gpu:0"):
    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(n_epochs):
            print("Epoch:", str(epoch) + "/" + str(n_epochs))
            batch_index = 0
            for X_batch, y_batch in shuffle_batch(X_train, np.array(y_train).reshape(-1), batch_size):
                sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
            acc_batch = mae.eval(feed_dict={X: X_batch, y: y_batch})
            acc_val = mae.eval(feed_dict={X: X_test, y: np.array(y_test).reshape(-1)})
            loss_batch = loss.eval(feed_dict={X: X_batch, y: y_batch})
            loss_val = loss.eval(feed_dict={X: X_test, y:  np.array(y_test).reshape(-1)})
            print("Batch mae:", acc_batch, "Val mae:", acc_val)
            print("Batch loss:", loss_batch, "Val loss:", loss_val)

Keras代码

import tensorflow as tf
from keras import layers
from keras import models
from keras import optimizers
from keras import initializers
from keras import regularizers

network = models.Sequential()

network.add(layers.Dense(units=200, 
                         activation=tf.nn.leaky_relu, 
                         input_shape=(X_train.shape[1], )))

# output layer
network.add(layers.Dense(units=1))

network.compile(loss="mae", 
                optimizer=optimizers.Adam(lr=0.001),
                metrics=["mae"])

history = network.fit(X_train,
                      np.array(y_train).reshape(-1),
                      epochs=200, 
                      verbose=1,
                      batch_size=1000,
                      validation_data=(X_test, np.array(y_test).reshape(-1)),
                      shuffle=True)

0 个答案:

没有答案