如何使用张量流测试训练模型进行图像分类

时间:2018-04-12 09:33:26

标签: tensorflow

我使用张量流创建了一个简单的图像分类网络并成功训练了它。但是在使用相同的输入图像测试模型时,我得到了不同的预测结果。详情见下文

数据集中只有两个类(即dog和cat)。在使用之前将数据集转换为tfrecord。网络架构如下所示。

def conv_layer(input, channels_in, fileter_size,channels_out,name="conv"):
    with tf.name_scope(name):
        w = tf.Variable(tf.truncated_normal([fileter_size,fileter_size,channels_in,channels_out],stddev =0.1),name = name + "/W")
        b = tf.Variable(tf.constant(0.1,shape= [channels_out]),name= name +"/B")
        conv = tf.nn.conv2d(input,w,strides=[1,1,1,1],padding = "SAME")
        ret = tf.nn.max_pool( (conv + b),ksize = [1,2,2,1], strides = [1,2,2,1],padding = "SAME")
    return(ret) 

def fc_layer(input,channels_in,channels_out,name = "fc"):
    with tf.name_scope(name):
        w = tf.Variable(tf.truncated_normal([channels_in,channels_out],stddev =0.1),name = name +"/W")
        b = tf.Variable(tf.constant(0.1,shape= [channels_out]),name= name +"/B")
        mul = tf.matmul(input,w)
        ret = tf.add(mul,b, name = "logits")
    return(ret)

def inference(image_batch):
    fc1_size = 128
    num_classes = 2
    conv1 = conv_layer(image_batch,3,3,32,"conv1")
    conv2 = conv_layer(conv1,32,3,32,"conv2")
    conv3 = conv_layer(conv2,32,3,64,"conv3")
    shape = conv3.get_shape()
    #shape = tf.shape(conv3)
    flat = tf.reshape(conv3,shape = [-1,shape[1:4].num_elements()])
    fc1 = fc_layer(flat,shape[1:4].num_elements(),fc1_size,"fc1")
    logits = fc_layer(fc1,fc1_size,num_classes,"fc2")
    return(logits)

模型训练通过以下代码完成

label, image = ReadData.read_and_decode_single_example(["trian.tfrecords"],2,128*128*3)
label = tf.argmax(label)
image = tf.cast(image, tf.float32)
# groups examples into batches randomly
image = tf.reshape(image,shape = [128,128,3])
image = tf.multiply(image,1.0/255,name = "in_image")#np.multiply(image, 1.0 / 255.0)
image_batch_ph = tf.placeholder(tf.float32,shape = [None,128,128,3],name = "image_batch_ph")
label_batch_ph = tf.placeholder(tf.int64,shape = [None],name = "label_batch_ph")
images_batch, labels_batch = tf.train.shuffle_batch(
    [image, label], batch_size=32,
    capacity=2000,
    min_after_dequeue=1000,name = "shuffle_step")
predict = inference(image_batch_ph)
y_pred = tf.nn.softmax(predict,name='y_pred')
loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels = label_batch_ph,logits = predict)
train_op = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)

loss_mean = tf.reduce_mean(loss)
correct_prediction = tf.equal(tf.argmax(predict,1),label_batch_ph)
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) 

sess = tf.Session()
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess.run(init)

tf.train.start_queue_runners(sess=sess)

i = 0

while i < 10000:
    #labels = sess.run(labels_batch)    
    #print labels
    imgs,lbs = sess.run([images_batch,labels_batch])
    _,lossMean = sess.run([train_op,loss_mean],feed_dict={image_batch_ph:imgs,label_batch_ph:lbs})
         if i % 1000 == 0:
              print "iteration  ",i,"Loss   :",lossMean    
         if i%2000 == 0:
              acc = sess.run([accuracy],feed_dict={image_batch_ph:imgs,label_batch_ph:lbs})
              print "iteration  ",i,"accuracy   :",acc
    i += 1
save_path = saver.save(sess, "./model/model.ckpt")
print( "model is saved at %s",save_path)

“ReadData.read_and_decode_single_example()”函数返回单个图像张量和相应的标签张量。当前会话(训练模型)保存在文件夹./model

为了测试,我写了下面显示的另一个脚本。我的目的是加载上面脚本保存的会​​话,并使用该模型对图像进行分类。

import cv2
image_size = 128 
image = cv2.imread("./dog.11.jpg")
image = cv2.resize(image, (image_size, image_size),0,0, cv2.INTER_LINEAR)
image = tf.cast(image, tf.float32)
image = tf.reshape(image,shape = [128,128,3])
image = tf.multiply(image,1.0/255,name = "in_image")#np.multiply(image, 1.0 / 255.0)
image = tf.expand_dims(image, 0)
sess = tf.Session()
new_saver = tf.train.import_meta_graph('./model/model.ckpt.meta')
new_saver.restore(sess, tf.train.latest_checkpoint('./model/'))
init = tf.global_variables_initializer()
sess.run(init) 
image_batch_ph = tf.get_default_graph().get_tensor_by_name("image_batch_ph:0")
test_image = sess.run(image) 
y_pred = tf.get_default_graph().get_tensor_by_name("y_pred:0")

predicted_labels = sess.run(y_pred,feed_dict={image_batch_ph:test_image})

print predicted_labels

在使用相同图像测试模型时,多个时间预测结果是不同的。我无法弄清楚出了什么问题。

1 个答案:

答案 0 :(得分:1)

错误是我的。从检查点使用预先训练的模型时,您不需要

init = tf.global_variables_initializer()
sess.run(init)

这将使用随机新值初始化权重。