在Tensorflow

时间:2017-07-27 15:29:59

标签: python-3.x tensorflow jupyter-notebook conv-neural-network

我一般都是使用Tensorflow和机器学习的完全初学者,所以有很多概念我仍然不太了解,很抱歉,如果我的错误很明显。我正在尝试使用我自己的图像(光学显微镜照片)调整我自己的卷积网络,调整大小为60x60,我只有2个标签来对它们进行分类(如果样本是正的或不是)。这是我的代码:

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes

sess = tf.InteractiveSession()

# Load dataset in two lists (images and labels).
def load_data(data_dir):
    directories = [d for d in os.listdir(data_dir) 
               if os.path.isdir(os.path.join(data_dir, d))]

    labels = []
    images = []
    for d in directories:
        label_dir = os.path.join(data_dir, d)
        file_names = [os.path.join(label_dir, f) 
                  for f in os.listdir(label_dir) if f.endswith(".JPG")]

        for f in file_names:
            images.append(f) 
            labels.append(int(d))
    return images, labels


# Load training and testing datasets.
ROOT_PATH = "Proyecto"
train_data_dir = os.path.join(ROOT_PATH, "Imagenes_entrenamiento")
test_data_dir = os.path.join(ROOT_PATH, "Imagenes_test")

images_train, labels_train = load_data(train_data_dir)
images_test, labels_test = load_data(test_data_dir)

# Converting training data to tensors.
timages_train = ops.convert_to_tensor(images_train, dtype=dtypes.string)
tlabels_train = ops.convert_to_tensor(labels_train, dtype=dtypes.int32)

# Converting testing data to tensors.
timages_test = ops.convert_to_tensor(images_test, dtype=dtypes.string)
tlabels_test = ops.convert_to_tensor(labels_test, dtype=dtypes.int32)

# Creation of a training queue.
num_files_train = len(images_train)
filename_train_queue = tf.train.slice_input_producer([timages_train, 
tlabels_train], num_epochs=None, shuffle=True, capacity=num_files_train)

# Creation of a testing queue.
num_files_test = len(images_test)
filename_test_queue = tf.train.slice_input_producer([timages_test, 
tlabels_test], num_epochs=None, shuffle=True, capacity=num_files_test)

# Decoding and resizing train images
raw_image_train= tf.read_file(filename_train_queue[0])
decoded_image_train = tf.image.decode_jpeg(raw_image_train, channels=3)
decoded_image_train = tf.cast(decoded_image_train, tf.float32)
resized_train_image = tf.image.resize_images(decoded_image_train, [60, 60])

# Decoding and resizing test images
raw_image_test= tf.read_file(filename_test_queue[0])
decoded_image_test = tf.image.decode_jpeg(raw_image_test, channels=3)
decoded_image_test = tf.cast(decoded_image_test, tf.float32)
resized_test_image = tf.image.resize_images(decoded_image_test, [60, 60])

# Extracting training and testing labels.
label_train_queue = filename_train_queue[1]
label_test_queue = filename_test_queue[1]

# Training batch.
batch_size_train = 5
image_train_batch, label_train_batch = tf.train.batch([resized_train_image, 
label_train_queue], batch_size_train)

# Testing batch.
batch_size_test = 2
image_test_batch, label_test_batch = tf.train.batch([resized_test_image, 
label_test_queue], batch_size_test)

# General model
x = tf.placeholder(tf.float32, shape=[None, 60, 60, 3]) 
y_ = tf.placeholder(tf.int32, shape=[None]) 
keep_prob = tf.placeholder(tf.float32)

# Weights and biases
dense_w={
"w_conv1": tf.Variable(tf.truncated_normal([5,5,3,32],stddev=0.1), 
name="w_conv1"),
"b_conv1": tf.Variable(tf.constant(0.1,shape=[32]), name="b_conv1"),
"w_conv2": tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1), 
name="w_conv2"),
"b_conv2": tf.Variable(tf.constant(0.1,shape=[64]), name="b_conv2"),
"w_fc1": tf.Variable(tf.truncated_normal([15*15*64,1024],stddev=0.1), 
name="w_fc1"),
"b_fc1": tf.Variable(tf.constant(0.1,shape=[1024]), name="b_fc1"),
"w_fc2": tf.Variable(tf.truncated_normal([1024,2],stddev=0.1), 
name="w_fc2"),
"b_fc2": tf.Variable(tf.constant(0.1,shape=[2]), name="b_fc2")
}

# CNN model
def dense_cnn_model(weights):
    def conv2d(x, W):
         return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

    def max_pool_2x2(x):
         return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1], padding='SAME')

    x_image = tf.reshape(x, [-1,60,60,3])
    h_conv1 = tf.nn.relu(conv2d(x_image, weights["w_conv1"]) + 
        weights["b_conv1"])
    h_pool1 = max_pool_2x2(h_conv1)
    h_conv2 = tf.nn.relu(conv2d(h_pool1, weights["w_conv2"]) + 
        weights["b_conv2"])
    h_pool2 = max_pool_2x2(h_conv2)
    h_pool2_flat = tf.reshape(h_pool2, [-1, 15*15*64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, weights["w_fc1"]) + 
        weights["b_fc1"])
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
    y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, weights["w_fc2"]) + 
        weights["b_fc2"])
    return y_conv

y_conv = dense_cnn_model(dense_w)

cross_entropy=tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=y_conv, labels=tf.squeeze(y_)))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_,))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

init_op = tf.group(tf.local_variables_initializer(), 
tf.global_variables_initializer())

with tf.Session() as sess:
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    ## Training:
    for i in range(50):
        image_train_batch_eval, 
        label_train_batch_eval=image_train_batch.eval(),
        label_train_batch.eval()
        if i % 2 == 0:
            train_accuracy = accuracy.eval(feed_dict={x:
                image_train_batch_eval, y_: label_train_batch_eval, 
                keep_prob: 0.5})
            print('Paso %d, Precisión de entrenamiento: %g' % 
            (i,train_accuracy))
        train_step.run(feed_dict={x: image_train_batch_eval, y_: 
            label_train_batch_eval, keep_prob: 0.5})

    ## Testing
    image_test_batch_eval, label_test_batch_eval=image_test_batch.eval(), 
    label_test_batch.eval()
    print('Precisión de evaluación: %g' % accuracy.eval(feed_dict={
       x: image_test_batch_eval, y_: label_test_batch_eval, keep_prob:1.0}))

    coord.request_stop()
    coord.join(threads)

编辑: 代码已更正。

1 个答案:

答案 0 :(得分:0)

您需要将enqueue_many=True传递给tf.train.batch,以表示您一次将多个示例排入队列,否则会将其视为具有许多功能的单个示例。