我正在尝试使用相同的数据集(MNIST的数字位数数据集)对预训练模型的最后一层进行再训练,但是再训练模型的准确性比初始模型差得多。我的初始模型的准确度约为98%,而经过重新训练的模型准确度则根据运行情况在40-80%之间变化。当我完全不训练前两个图层时,我会得到类似的结果。
和代码:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
epochs1 = 150
epochs2 = 300
batch_size = 11000
learning_rate1 = 1e-3
learning_rate2 = 1e-4
# Base model
def base_model(input, reuse=False):
with tf.variable_scope('base_model', reuse=reuse):
layer1 = tf.contrib.layers.fully_connected(input, 300)
features = tf.contrib.layers.fully_connected(layer1, 300)
return features
mnist = input_data.read_data_sets('./mnist/', one_hot=True)
image = tf.placeholder(tf.float32, [None, 784])
label = tf.placeholder(tf.float32, [None, 10])
features1 = base_model(image, reuse=False)
features2 = base_model(image, reuse=True)
# Logits1 trained with the base model
with tf.variable_scope('logits1', reuse=False):
logits1 = tf.contrib.layers.fully_connected(features1, 10, tf.nn.relu)
# Logits2 trained while the base model is frozen
with tf.variable_scope('logits2', reuse=False):
logits2 = tf.contrib.layers.fully_connected(features2, 10, tf.nn.relu)
# Var Lists
var_list_partial1 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='logits1')
var_list_partial2 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='base_model')
var_list1 = var_list_partial1 + var_list_partial2
var_list2 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='logits2')
# Sanity check
print("var_list1:", var_list1)
print("var_list2:", var_list2)
# Cross Entropy Losses
loss1 = tf.nn.softmax_cross_entropy_with_logits(logits=logits1, labels=label)
loss2 = tf.nn.softmax_cross_entropy_with_logits(logits=logits2, labels=label)
# Train the final logits layer
train1 = tf.train.AdamOptimizer(learning_rate1).minimize(loss1, var_list=var_list1)
train2 = tf.train.AdamOptimizer(learning_rate2).minimize(loss2, var_list=var_list2)
# Accuracy operations
correct_prediction1 = tf.equal(tf.argmax(logits1, 1), tf.argmax(label, 1))
correct_prediction2 = tf.equal(tf.argmax(logits2, 1), tf.argmax(label, 1))
accuracy1 = tf.reduce_mean(tf.cast(correct_prediction1, "float"))
accuracy2 = tf.reduce_mean(tf.cast(correct_prediction2, "float"))
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
batches = int(len(mnist.train.images) / batch_size)
# Train base model and logits1
for epoch in range(epochs1):
for batch in range(batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(train1, feed_dict={image: batch_xs, label: batch_ys})
# Train logits2 keeping the base model frozen
for epoch in range(epochs2):
for batch in range(batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(train2, feed_dict={image: batch_xs, label: batch_ys})
# Print the both models after training
accuracy = sess.run(accuracy1, feed_dict={image: mnist.test.images, label: mnist.test.labels})
print("Initial Model Accuracy After training final model:", accuracy)
accuracy = sess.run(accuracy2, feed_dict={image: mnist.test.images, label: mnist.test.labels})
print("Final Model Accuracy After Training:", accuracy)
谢谢!
答案 0 :(得分:1)
尝试消除“ logits1”和“ logits2”中的非线性。
我将您的代码更改为:
# Logits1 trained with the base model
with tf.variable_scope('logits1', reuse=False):
#logits1 = tf.contrib.layers.fully_connected(features1, 10, tf.nn.relu)
logits1 = tf.contrib.layers.fully_connected(features1, 10, None)
# Logits2 trained while the base model is frozen
with tf.variable_scope('logits2', reuse=False):
#logits2 = tf.contrib.layers.fully_connected(features2, 10, tf.nn.relu)
logits2 = tf.contrib.layers.fully_connected(features2, 10, None)
,结果更改为:
Initial Model Accuracy After training final model: 0.9805
Final Model Accuracy After Training: 0.9658
P.S。 300+ 300个神经元对于MNIST分类器来说实在是太多了,但我认为您的意思不是对MNIST分类:)