我已经建立了一个用于检测人脸的CNN。从第一个时代开始,我的准确度就越来越高。可能是什么原因呢?

时间:2017-01-31 09:51:38

标签: python tensorflow deep-learning face-recognition

训练数据集包括从youtube人脸数据库中获取的面部图像,标记为一,非面部图像取自256个对象类别,正面和负面数据均选择25k图像。所以训练总共50k,另外10k图像取自yooutube面孔和256个不重复的对象类别。

问题是我在第一个时代本身只进行了12k次迭代后获得了99%的准确率,而且我也是打印成本值,也是从像这样的596014.000这样的非常高的值开始。当针对其他脸部图像进行测试时,它的表现非常糟糕。 cost vs epoch graph

import tensorflow as tf
import read_data
from sklearn import metrics
import numpy as np
import os
import graph_plotter as gp

# Parameters
learning_rate = 0.001
epochs = 30
batch_size = 100
display_step = 5

# tf Graph input
input_data = tf.placeholder(tf.float32, [None, 27, 31, 3])
output_data = tf.placeholder(tf.float32, [None, 1])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)

# Getting train and test data
train_data, train_label , test_data, test_label = read_data.getData()

def conv2d(x, w, bias, k=1):
    x = tf.nn.conv2d(x, w, strides=[1, k, k, 1], padding='SAME')
    x = tf.nn.bias_add(x, bias)
    return tf.nn.relu(x)


# Performs max pooling on the convolution layer output
def maxpool2d(x, k=2):
   return tf.nn.max_pool(x, 
   ksize=[1, k, k, 1], strides=[1, k, k, 1],       
   padding='SAME')

# Weights generated randomly according to layer
weights = {

   # Conv 4*4 , 1 input , 32 outputs
   'wc1': tf.Variable(tf.random_normal([4, 4, 3, 32])),

   # Conv 3*3 , 32 inputs , 32 outputs
  'wc2': tf.Variable(tf.random_normal([3, 3, 32, 64])),

  # Conv 5*6 , 64 input , 128 outputs
  'wc3': tf.Variable(tf.random_normal([5, 6, 64, 128])),

  # Conv 1*1 , 128 inputs , 256 outputs
  'wc4': tf.Variable(tf.random_normal([1, 1, 128, 256])),

  # Conv 1*1 , 256 inputs , 256 outputs
  'wc5': tf.Variable(tf.random_normal([1, 1, 256, 512])),

  # Output Layer 7*8*256 inputs and 1 output ( face or non-face )
  'out': tf.Variable(tf.random_normal([7*8*512, 1]))
  }

 biases = {

   'bc1': tf.Variable(tf.random_normal([32])),
   'bc2': tf.Variable(tf.random_normal([64])),
   'bc3': tf.Variable(tf.random_normal([128])),
   'bc4': tf.Variable(tf.random_normal([256])),
   'bc5': tf.Variable(tf.random_normal([512])),
   'out': tf.Variable(tf.random_normal([1]))
 }

 def model(x, weight, bias, dropout):

 # Layer 1
 conv1 = conv2d(x, weight['wc1'], bias['bc1'])
 conv1 = maxpool2d(conv1, k=2)

 # Layer 2
 conv2 = conv2d(conv1, weight['wc2'], bias['bc2'])
 conv2 = maxpool2d(conv2, k=2)

 # Layer 3
 conv3 = conv2d(conv2, weight['wc3'], bias['bc3'])

 # Layer 4
 conv4 = conv2d(conv3, weight['wc4'], bias['bc4'])

 # Layer 5
 conv5 = conv2d(conv4, weight['wc5'], bias['bc5'])

 #conv5 = tf.nn.dropout(conv5, dropout)

 # Flattening data
 intermediate = tf.reshape(conv5, shape=[-1, 7*8*512])

 # Output Layer
 output = tf.add(tf.matmul(intermediate, weight['out']), bias['out'])
 return output

 pred = model(input_data, weights, biases, keep_prob)

 l2_loss = 0.001*(
            tf.nn.l2_loss(weights.get('wc4')) +
            tf.nn.l2_loss(weights.get('wc5')) +
            tf.nn.l2_loss(weights.get('out')))

cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
 pred,    output_data)) + l2_loss

tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
correct_pred = tf.equal(
tf.greater(sigmoid_output, 0.5),      tf.greater(output_data, 0.5))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

y_p = tf.cast(tf.greater(sigmoid_output, 0.5), tf.int32)

saver = tf.train.Saver()
tf.add_to_collection('y_p', y_p)
tf.add_to_collection('pred', pred)
tf.add_to_collection('x', input_data)
tf.add_to_collection('y', output_data)

init = tf.global_variables_initializer()
with tf.device("/gpu:0"):
  with tf.Session() as sess:
    sess.run(init)
    train_data_minibatches = [train_data[k:k + batch_size] 
    for k in   range(0, len(train_data), batch_size)]
    train_label_minibatches = [train_label[k:k + batch_size] 
    for k in range(0, len(train_label), batch_size)]
    step = 0
    batch_count = 0
    avg_cost_list = []
    avg_accuracy_list = []
    for epoch in range(epochs):
        print('Epoch '+epoch.__str__())
        cost_list = []
        accuracy_list = []
        for batch_x, batch_y in zip(
            train_data_minibatches, train_label_minibatches):
            batch_count += 1
            sess.run(optimizer, feed_dict={
            input_data: batch_x,  output_data: batch_y,
                                         keep_prob: 0.75})
            # if epoch % display_step == 0:
            loss, acc, output = sess.run([cost, accuracy, sig],
            feed_dict={input_data: batch_x,                                                                    output_data: batch_y,                                                                    keep_prob: 0.75})

            cost_list.append(loss)
            accuracy_list.append(acc)

            print("Iter " + str(step * batch_size) +" Loss "+ "{:.5f}".format(loss)+ ", Training Accuracy= " +
                  "{:.5f}".format(acc))
            step += 1
        average_cost = sum(cost_list) / len(cost_list)
        average_acc = sum(accuracy_list) / len(accuracy_list)

        avg_cost_list.append(average_cost)
        avg_accuracy_list.append(average_acc)

        if epoch % display_step == 0:
            test_acc, y_pred = sess.run([accuracy, y_p], feed_dict={input_data: test_data,
                                                        output_data: test_label,
                                                        keep_prob: 0.75})

            print(metrics.confusion_matrix(test_label, y_pred))
            print("Testing Accuracy : " + "{:.5f}".format(test_acc))
            print("Optimization finished !!")

    # Saving cost Vs epoch graph, and accuracy Vs epoch graphs.
    gp.cost_vs_epoch(avg_cost_list)
    gp.accuracy_vs_epoch(avg_accuracy_list)

    save_path = saver.save(sess=sess, save_path=save_path, write_meta_graph=True)

1 个答案:

答案 0 :(得分:1)

该行

correct_pred = tf.equal(tf.greater(sigmoid_output, 0.5),
                        tf.greater(output_data, 0.5))

可能是错的。您的代码似乎没有sigmoid_output,而只有pred

如果那不是问题,我会调查train_data。多少" True"标签>你有0.5?你有多少评价为假的标签?