在使用tensorflow和tflearn时,在卷积神经网络中获得不同的准确率

时间:2018-03-29 06:37:58

标签: tensorflow conv-neural-network tflearn

所以我使用tflearn制作CNN并且准确性很好,但我尝试用相同的学习率和其他参数训练相同类型的网络。但由于某种原因,我不明白使用张量流时的准确度较低。这有什么原因吗?

这是我的神经网络层:

def cnn(x):
x = tflearn.layers.core.input_data(shape=[None, 50, 50, 3], placeholder=x)

conv_layer1 = tflearn.layers.conv.conv_2d(x, nb_filter=32, filter_size=5, activation='relu')

out_layer_1 = tflearn.layers.max_pool_2d(conv_layer1, 5)

conv_layer2 = tflearn.layers.conv.conv_2d(out_layer_1, nb_filter=64, filter_size=5, activation='relu')

out_layer_2 = tflearn.layers.max_pool_2d(conv_layer2, 5)

conv_layer3 = tflearn.layers.conv.conv_2d(out_layer_2, nb_filter=128, filter_size=5, activation='relu')

out_layer_3 = tflearn.layers.max_pool_2d(conv_layer3, 5)

conv_layer4 = tflearn.layers.conv.conv_2d(out_layer_3, nb_filter=64, filter_size=5, activation='relu')

out_layer_4 = tflearn.layers.max_pool_2d(conv_layer4, 5)

conv_layer5 = tflearn.layers.conv.conv_2d(out_layer_4, nb_filter=32, filter_size=5, activation='relu')

out_layer_5 = tflearn.layers.max_pool_2d(conv_layer5, 5)

fc1 = tflearn.layers.core.fully_connected(out_layer_5, 1024, activation='relu', name="FC1")
fc1_dropout = tflearn.layers.core.dropout(fc1, 0.5)


output = tflearn.layers.core.fully_connected(fc1_dropout, 2, activation='softmax', name='output')

return output

这是我的训练功能:

def train_model():
x = tf.placeholder(tf.float32, shape=[None, 50, 50, 3], name="x")
x_image = tf.reshape(x, [-1, 50, 50, 3])
y = tf.placeholder(tf.float32, shape=[None, 2], name="y")
y_cls = tf.argmax(y, dimension=1)

y_pred = cnn(x_image)

print "Importing Training Data..."
x_train = np.load('data/CatOrDog/training_images.npy')
y_train = np.load('data/CatOrDog/training_labels.npy')
y_train = [[1, 0] if label == 'Dog' else [0, 1] for label in y_train]
y_train = np.array(y_train)

randomer = np.arange(x_train.shape[0])
np.random.shuffle(randomer)
x_train = x_train[randomer]
y_train = y_train[randomer]
n_data = len(x_train)
x_train =  np.array(x_train, dtype='float32')

print "Images Shape: ", x_train.shape, "\t", x_train.dtype
print "\nImporting Testing Data..."
x_test = np.load('data/CatOrDog/testing_images.npy')
y_test = np.load('data/CatOrDog/testing_labels.npy')
y_test = [[1, 0] if testing_label == 'Dog' else [0, 1] for testing_label in y_test]
y_test = np.array(y_test)
x_test = np.array(x_test, dtype='float32')

randomer = np.arange(x_test.shape[0])
np.random.shuffle(randomer)
x_test = x_test[randomer]
y_test = y_test[randomer]

n_data = len(x_train)
n_test_data =len(x_test)

'''divider = int(n_test_data / 2)

x_test_data = x_test[0:divider]
y_test_data = y_test[0:divider]

x_validation_data = x_test[divider+1:n_test_data-1]
y_validation_data = y_test[divider + 1:n_test_data - 1]'''


with tf.variable_scope("Softmax"):
    y_pred_cls = tf.argmax(y_pred, dimension=1)

with tf.name_scope("cross_ent"):
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_pred, labels=y)
    cost = tf.reduce_mean(cross_entropy)

with tf.name_scope("Optimizer"):
    optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)

with tf.name_scope("Accuracy"):
    correct_prediction = tf.equal(y_pred_cls, y_cls)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

writer = tf.summary.FileWriter("Training_FileWriter/")
writer1 = tf.summary.FileWriter("Validation_FileWriter/")

tf.summary.scalar('loss', cost)
tf.summary.scalar('accuracy', accuracy)

merged_summary = tf.summary.merge_all()

num_epochs = 10
batch_size = 300

with tf.Session() as sess:
   # x = sess.graph.get_tensor_by_name('x')
    sess.run(tf.global_variables_initializer())

    writer.add_graph(sess.graph)

    for epoch in range(num_epochs):

        start_time = time.time()
        train_accuracy = 0
        cur_batch = int(n_data / batch_size)
        prev_index = 0
        bar = progressbar.ProgressBar(maxval=cur_batch)
        bar.start()
        for batch in range(0, cur_batch):
            start, end = ClassifyData.get_batch_array_indexes(previous_index=prev_index, batch_size=batch_size, n_data=n_data)
            if start == n_data:
                break

            x_batch = x_train[start:end]
            y_true_batch = y_train[start:end]
            feed_dict_train = {x: x_batch, y: y_true_batch}

            sess.run(optimizer, feed_dict=feed_dict_train)

            train_accuracy += sess.run(accuracy, feed_dict=feed_dict_train)

            summ = sess.run(merged_summary, feed_dict=feed_dict_train)
            writer.add_summary(summ, epoch * int(n_data / batch_size) + batch)
            bar.update(batch)
        bar.finish()
        train_accuracy /= int(n_data / batch_size)

        summ, vali_accuracy = sess.run([merged_summary, accuracy],
                                       feed_dict={x: x_test, y: y_test})
        writer1.add_summary(summ, epoch)

        end_time = time.time()

        print "\nEpoch " + str(epoch + 1) + " completed : Time usage " + str(
            int(end_time - start_time)) + " seconds"
        print "\tAccuracy:"
        print "\t- Training Accuracy:\t{}".format(train_accuracy)
        print "\t- Validation Accuracy:\t{}".format(vali_accuracy)

PS我正在使用tflearn来构建我的网络。

0 个答案:

没有答案