从转学习获得线性回归分数

时间:2017-02-15 07:29:54

标签: machine-learning classification conv-neural-network

我的任务是为图像指定0.0到1.0的分数。为此我已经使用了已经学过的模型,用于分类ImageNet竞争,如VGG,SqueezeNet等。从这个模型的复杂层的输出,我添加了我自己的2或3个密集层(完全连接的层),与前几个层有一定的' x'隐藏单位和最后一层只有一个单位。来自最后一层(有一个单位)的值,我用作得分。

我正在对所有密集的图层进行再训练,但在我进行训练后,无论我发送哪个输入,我都会得到大约0.75的恒定分数。我有一套50000张图像的良好训练。

有人可以解释一下我在这种方法中出错的地方。此外,关于如何处理此类问题的一些指示将非常有用。

代码的重要部分: -

from tensorflow.python.ops import control_flow_ops

def fcLayer(images, weight, bias, should_activate = True):
    fc = tf.matmul(images, weight)
    bias_add = tf.nn.bias_add(fc, bias)
    if not should_activate:
        return bias_add
    out = tf.nn.relu(bias_add)
    return out

weights = np.load('../Data/vgg16_weights.npz')
def fc_VGG(pool5_flat):   # Feed directly the bottleneck features.
    # fc6
    with tf.variable_scope('fc6'):
        fc6W = tf.get_variable('fc6_W', dtype = tf.float32, trainable = True,
                                initializer = weights['fc6_W'])
        fc6b = tf.get_variable('fc6_b', dtype = tf.float32, trainable = True,
                                 initializer = weights['fc6_b'])
        fc6 = fcLayer(pool5_flat, fc6W, fc6b)

    # fc7
    with tf.variable_scope('fc7'):
        fc7W = tf.get_variable('fc7_W', dtype = tf.float32, trainable = True,
                                initializer = weights['fc7_W'])
        fc7b = tf.get_variable('fc7_b', dtype = tf.float32, trainable = True,
                                 initializer = weights['fc7_b'])
        fc7 = fcLayer(fc6, fc7W, fc7b)
        fc7 = tf.cond(is_train, lambda: tf.nn.dropout(fc7, keep_prob = 0.35), lambda: fc7)

    with tf.variable_scope('fc8'):
        fc7_shape = int(np.prod(fc7.get_shape()[1:]))
        fc8W = tf.get_variable('fc8_W', dtype = tf.float32, trainable = True,
                                initializer = tf.random_normal((fc7_shape, new_output_units), stddev = 1e-1))
        fc8b = tf.get_variable('fc8_b', dtype = tf.float32, trainable = True,
                                 initializer = tf.ones((1)))
        fc8 = fcLayer(fc7, fc8W, fc8b, should_activate = False)

    return fc8

learning_rate = 0.0001

tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (None, 25088))
y = tf.placeholder(tf.float32, shape = (None))
alpha = tf.constant(learning_rate, tf.float32)
is_train = tf.placeholder(tf.bool)

logits = fc_VGG(X)
loss = tf.reduce_mean(tf.abs(tf.subtract(logits, y)))
optimizer = tf.train.AdamOptimizer(learning_rate = alpha).minimize(loss)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    for i in range(EPOCHS):
        current_learning_rate = learning_rate * (1 - WEIGHT_DECAY)
        num_examples = len(y_train)

        X_train_files, y_train = shuffle(X_train_files, y_train)
        for offset in range(0, num_examples, BATCH_SIZE):
                end = offset + BATCH_SIZE
                batch_x_files, batch_y = X_train_files[offset: end], y_train[offset: end]
                batch_x = load_batchX()

                _, loss_val = sess.run([optimizer, loss], feed_dict = {X: batch_x, y: batch_y,
                                                                        alpha: current_learning_rate, is_train: True})
                loss_history.append(loss_val)

0 个答案:

没有答案