使用Tensorflow,Triplet-Loss无法收敛

时间:2018-03-04 20:14:16

标签: python tensorflow

目前我正在尝试使用Tensorflow附加我的代码来实现三元组丢失。

但我发现培训总是分歧。任何人都可以帮我找出问题所在吗?

def compute_triplet_loss(anchor_features, positive_features, negative_features, margin=0.1):
    with tf.name_scope("triplet_loss"):
        anchor_features_norm = compute_norm(anchor_features)
        positive_features_norm = compute_norm(positive_features)
        negative_features_norm = compute_norm(negative_features)

        denom1 =  tf.multiply(anchor_features_norm, positive_features_norm)
        denom2 =  tf.multiply(anchor_features_norm, negative_features_norm)

        a_p_product = compute_dot_product(anchor_features, positive_features)
        a_n_product = compute_dot_product(anchor_features, negative_features)

        a_p_vec = tf.divide(a_p_product, denom1)
        a_n_vec = tf.divide(a_n_product, denom2)

        loss =  tf.maximum(0., tf.add(tf.subtract(a_n_vec, a_p_vec), margin))
        return loss

def DNN(input_tensor, is_training, Reuse=True):
    tf.add_to_collection('input_tensor', input_tensor)
    with tf.name_scope('uttr_net', [input_tensor]):
        with tf.variable_scope('uttr_vars', reuse=Reuse):
            x = slim.fully_connected(input_tensor, 1024, scope='fc_1')
            x = tf.layers.batch_normalization(x, training=is_training, momentum=0.995, name='bn_1')
            x = slim.fully_connected(x, 1024, scope='fc_2')
            x = tf.layers.batch_normalization(x, training=is_training, momentum=0.995, name='bn_2')
            x = slim.fully_connected(x, 1024, scope='fc_3')
            x = tf.layers.batch_normalization(x, training=is_training, momentum=0.995, name='bn_3')
            x = slim.fully_connected(x, 80, scope='d_vec')
            x = tf.layers.batch_normalization(x, training=is_training, momentum=0.995, name='bn_4')
            # pooling
            x = tf.expand_dims(x, 1)
            x = tf.nn.avg_pool(x, ksize=[1,1,2,1], strides=[1,1,1,1], padding='VALID')
            x = tf.squeeze(x, [1])
            x = slim.dropout(x, keep_prob=0.65, is_training=is_training, scope='final_drop')
            x = tf.reduce_mean(x, axis=1, name='uttr_mean')
            tf.add_to_collection('d-vec', x)
    return x

def build_graph(input_shape, learning_rate=0.001):
    input_positive_0 = tf.placeholder(tf.float32, shape=input_shape, name='input_positive_0')
    input_positive_1 = tf.placeholder(tf.float32, shape=input_shape, name='input_positive_1')
    input_negative = tf.placeholder(tf.float32, shape=input_shape, name='input_negative')
    is_training = tf.placeholder(tf.bool, [])
    tf.add_to_collection('input_positive_0', input_positive_0)
    tf.add_to_collection('input_positive_1', input_positive_1)
    tf.add_to_collection('input_negative', input_negative)
    tf.add_to_collection('is_training', is_training)
    DNN_0 = DNN(input_positive_0, is_training, False)
    DNN_1 = DNN(input_positive_1, is_training)
    DNN_2 = DNN(input_negative, is_training)

    loss = compute_triplet_loss(DNN_0, DNN_1, DNN_2)
    loss = tf.reduce_sum(loss)
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
    return optimizer, loss, input_positive_0, input_positive_1, input_negative, is_training

0 个答案:

没有答案