Tensorflow:在抛出'std :: system_error'的实例后终止调用what():资源暂时不可用

时间:2017-04-10 05:58:32

标签: python image numpy tensorflow deep-learning

以下代码用于为Kaggle上的Dogs and Cats Dataset运行CNN模型。我只是用1000张图片进行训练。在使用Tensorflow无法读取jpeg图像并通过占位符将其输入模型后,我尝试了以下方法。我收到以下错误。

在抛出'std :: system_error'的实例后终止调用   what():资源暂时不可用   中止(核心倾销)

请帮我调试下面的代码?

import os
import tensorflow as tf

import random
import numpy as np
from PIL import Image
from numpy import array

import sys
print("%x" % sys.maxsize, sys.maxsize > 2**32)

num_epochs = 1
max_steps = 10
batch_size = 100
min_queue_examples = 1000
num_preprocess_threads = None

test_folder = os.getcwd() + "\\test"
train_folder = os.getcwd() + "/train/"

# Get File Names and Shuffle them
filenames = os.listdir(train_folder)
random.shuffle(filenames)

filenames = [''.join([train_folder,x]) for x in filenames]

# Get Label Names
label_list = [x.split(".")[0] for x in filenames]
label_list = [1 if x=='cat' else 0 for x in label_list]

# Number of Pictures in List
N = len(filenames)
image_list = [None]*N

# Create List of Images
for i in range(N):

    image = Image.open(filenames[i]).convert('1')
    image = image.resize((28,28),Image.ANTIALIAS)
    image_array = array(image).flatten()

    image_list[i] = []
    image_list[i].append(image_array)

# Covert List to Array
label = np.asarray(label_list)
image = np.asarray(image_list)

print(np.shape(label_list))
print(np.shape(image_list))

# Create a Batch
image_batch, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,        
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)

#num_threads=num_preprocess_threads,

# Input Placeholder
with tf.name_scope('input'):
    x = tf.placeholder(tf.float32, shape=[None, 784], name = 'x-input')
    y = tf.placeholder(tf.float32, shape=[None, 2], name = 'y-output')

print("\nArchitecture Design:\n")

print("x: ", x.get_shape())
print("y: ", y.get_shape())

# Convolution Layer 1
with tf.name_scope('Convoltion_Layer_1'):   

    with tf.name_scope('weights_1'):

        W_conv1 = tf.truncated_normal([5, 5, 1, 32], stddev=0.1)
        W_conv1 = tf.Variable(W_conv1)

        b_conv1 = tf.constant(0.1, shape=[32])
        b_conv1 = tf.Variable(b_conv1)

    with tf.name_scope('input_reshape'):
        x_image = tf.reshape(x, [-1,28,28,1])

    # Convolution 1
    with tf.name_scope('Convoltion_1'):       
        h_conv1 = tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME') 
        h_conv1 = h_conv1 + b_conv1

    # Activation
    with tf.name_scope('ReLu__1'):
        activation_1 = tf.nn.relu(h_conv1)

    print("Conv_1 Shape: " , activation_1.get_shape())

# Max Pooling Layer 1    
with tf.name_scope('Max_Pooling_1'):
    h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    print("h_pool1 Shape: " , h_pool1.get_shape())

# Convolution Layer 2
with tf.name_scope('Convoltion_Layer_2'): 

    with tf.name_scope('weights_2'):
        W_conv2 = tf.truncated_normal([5, 5, 32, 64], stddev=0.1)
        W_conv2 = tf.Variable(W_conv2)

        b_conv2 = tf.constant(0.1, shape=[64])
        b_conv2 = tf.Variable(b_conv2)

    with tf.name_scope('Convoltion_Layer_2'):
        h_conv2 = tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME')
        h_conv2 = h_conv2 + b_conv2       

    # Activation
    with tf.name_scope('ReLu__2'):
        activation_2 = tf.nn.relu(h_conv2)

    print("Conv_2 Shape: " , activation_2.get_shape())

# Sub Sampling    
with tf.name_scope('Max_Pooling_2'):    
    h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    print("h_pool2 Shape: " , h_pool2.get_shape())

# Fully Connected Layer
with tf.name_scope('FC_1'):

    with tf.name_scope('weights_fc1'):
        W_fc1 = tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1)
        W_fc1 = tf.Variable(W_fc1)

        b_fc1 = tf.constant(0.1, shape=[1024])
        b_fc1 = tf.Variable(b_fc1)

    with tf.name_scope('Un_Rolling'):    
        h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])

    print("h_pool2_flat Shape: " , h_pool2_flat.get_shape())

    with tf.name_scope('weighted_sum'):
        h_fc1 = tf.matmul(h_pool2_flat, W_fc1) + b_fc1

    with tf.name_scope('ReLu_3'):
        activation_3 = tf.nn.relu(h_fc1)

    print("FC_1 Shape: " , activation_3.get_shape())

# Drop Out
with tf.name_scope('Drop_Out'):
    keep_prob = tf.placeholder(tf.float32, name='dropout-probability')
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

# Output Layer
with tf.name_scope('FC_2'):

    with tf.name_scope('weights_fc2'):
        W_fc2 = tf.truncated_normal([1024, 2], stddev=0.1)
        W_fc2 = tf.Variable(W_fc2)

        b_fc2 = tf.constant(0.1, shape=[2])
        b_fc2 = tf.Variable(b_fc2)

    with tf.name_scope('weighted_sum'):
        y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2

    print("FC_2 Shape: " , y_conv.get_shape())

# Find Cross Entropy
with tf.name_scope('cross_entropy'):
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y))

# Train Model
with tf.name_scope('train'):    
    train_op = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

# Find Accuracy
with tf.name_scope('accuracy'):
    correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# Summary Log
tf.summary.scalar('cross_entropy', cross_entropy)
tf.summary.scalar('accuracy', accuracy)
summary_op = tf.summary.merge_all()

# Launch Graph in Session

with tf.Session() as sess:

    #test_writer = tf.summary.FileWriter(log_dir + '/test')
    #train_writer = tf.summary.FileWriter(log_dir + '/train', sess.graph)

    tf.global_variables_initializer().run()       

    print("\Training Samples: ", np.shape(label_list))
    #print("Training Samples: ", len(mnist.train.images),"\n")

    for i in range(max_steps):

        ib, il = sess.run([image_batch, label_batch])

        print("Iteration: " + str(i))
        #[summary,train_accuracy] = sess.run([summary_op,accuracy],feed_dict={x:image_batch.eval(), y: label_batch.eval(), keep_prob: 1.0})
        #print("step %d, training accuracy %g"%(i, train_accuracy))
        #train_writer.add_summary(summary, i)
        #print(456)

        train_op.run(feed_dict={x:ib, y: il, keep_prob: 0.9})
        print(cross_entropy)

0 个答案:

没有答案