大纪元需要越来越多的时间

时间:2018-08-31 09:33:54

标签: tensorflow

我可能有一个“ blo肿的图表”,请参阅(Why does tf.assign() slow the execution time?),因为每个纪元花费的时间越来越多,但是我在代码中看不到它。 您能帮我吗,还是Tensorflow新手。

# NEURAL NETWORK
def MLP(x, weights, biases, is_training):

    # Hiden layer 1
    hLayer1 = tf.add(tf.matmul(x, weights["w1"]), biases["b1"])
    hLayer1 = tf.nn.sigmoid(hLayer1)
    bn1 = batch_norm_wrapper(hLayer1, gamma=weights["gamma1"], beta=weights["beta1"], is_training=is_training, name="1")
    hLayer1 = bn1


    # Hiden layer 2
    hLayer2 = tf.add(tf.matmul(hLayer1, weights["w2"]), biases["b2"])
    hLayer2 = tf.nn.sigmoid(hLayer2)
    bn2 = batch_norm_wrapper(hLayer2, gamma=weights["gamma2"], beta=weights["beta2"], is_training=is_training, name="2")
    hLayer2 = bn2

    # Output layer
    outLayer = tf.add(tf.matmul(hLayer2, weights["wOut"]), biases["bOut"], name="outLayer")

    return outLayer



# Weights and biases
weights = {
    "w1": tf.get_variable(shape=[n_input, n_hLayer1], initializer=tf.keras.initializers.he_normal(seed=5), name="w1", trainable=True),
    "w2": tf.get_variable(shape=[n_hLayer1, n_hLayer2], initializer=tf.keras.initializers.he_normal(seed=5), name="w2", trainable=True),
    "wOut": tf.get_variable(shape=[n_hLayer2, n_classes], initializer=tf.keras.initializers.he_normal(seed=5), name="wOut", trainable=True),

    "gamma1": tf.get_variable(shape=[n_hLayer1], initializer=tf.ones_initializer(), name="gamma1", trainable=True),
    "beta1": tf.get_variable(shape=[n_hLayer1], initializer=tf.zeros_initializer(), name="beta1", trainable=True),

    "gamma2":tf.get_variable(shape=[n_hLayer2], initializer=tf.ones_initializer(), name="gamma2", trainable=True),
    "beta2": tf.get_variable(shape=[n_hLayer2], initializer=tf.zeros_initializer(), name="beta2", trainable=True)
}

biases = {
    "b1": tf.get_variable(shape=[n_hLayer1], initializer=tf.zeros_initializer(), name="b1", trainable=True),
    "b2": tf.get_variable(shape=[n_hLayer2], initializer=tf.zeros_initializer(), name="b2", trainable=True),
    "bOut": tf.get_variable(shape=[n_classes], initializer=tf.zeros_initializer(), name="bOut", trainable=True)
}


def batch_norm_wrapper(inputs, gamma, beta,  is_training, name, decay=0.999):

    pop_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), name="pop_mean{}".format(name), trainable=False)
    pop_var = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), name="pop_var{}".format(name), trainable=False)

    if is_training:
        batch_mean, batch_var = tf.nn.moments(inputs, [0])

        train_mean = tf.assign(pop_mean, pop_mean*decay + batch_mean*(1-decay))
        train_var = tf.assign(pop_var, pop_var*decay + batch_var*(1-decay))


        with tf.control_dependencies([train_mean, train_var]):
            return tf.nn.batch_normalization(x=inputs, mean=batch_mean, variance=batch_var, scale=gamma, offset=beta, variance_epsilon=0.001)

    else:
        return tf.nn.batch_normalization(x=inputs, mean=pop_mean, variance=pop_var, scale=gamma, offset=beta, variance_epsilon=0.001)


# Model
predictions = MLP(next_element[0], weights, biases, is_training=True)


# Loss function and regularization
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=predictions, labels=next_element[1]))
l1_regularizer = tf.reduce_sum(tf.abs(weights["w1"])) + tf.reduce_sum(tf.abs(weights["w2"])) + tf.reduce_sum(tf.abs(weights["wOut"]))
l2_regularizer = tf.reduce_mean(tf.nn.l2_loss(weights["w1"]) + tf.nn.l2_loss(weights["w2"]) + tf.nn.l2_loss(weights["wOut"]))
loss = loss + r*alpha1*l1_regularizer + (1-r)*alpha2*l2_regularizer

# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)



# LAUNCH THE GRAPH
with tf.Session() as sess:

    sess.run(init_op)

    # Training
    for trainEpoch in range(training_epochs):

        sess.run(training_iterator_op)

        while True:

            try:
                value = sess.run(next_element)
                sess.run([loss, optimizer])

            except tf.errors.OutOfRangeError:
                break

我使用数据集API来运行我的训练数据。

2 个答案:

答案 0 :(得分:0)

一些原因导致在培训期间变慢

  • 这很可能是由于您的训练循环坚持了一些不应该做的事情。此外,还要确保不要将某些临时计算存储在不断增长的列表中而不删除它们。

  • 如果您使用的是自定义network/loss函数,则当您接近最佳解决方案时,计算成本也可能会提高。为了找到答案,您可以分别获取不同部分的时序:数据加载,网络正向,损耗计算,反向传递和参数更新。希望只会增加一个,您将能够更好地了解正在发生的事情。

参考:   Training gets slow down by each batch slowly [in Pytorch]

也请阅读:   training slow down [in Keras]

答案 1 :(得分:0)

我想我会在 TensorFlow 2.x.x(在我的情况下为 2.4.1)中分享我对与您的问题相关的训练问题的发现。 Here 是我在互联网上数小时研究后发现的结果。