损耗值常数,精度在0、0.5和1之间波动

时间:2018-11-22 18:30:36

标签: python tensorflow deep-learning kaggle

我是Tensorflow的初学者,并且编写了以下代码,用于使用Kaggle数据集对猫和狗进行分类。我已经修改了IBM edX课程中使用的代码,并尝试为Cats and Dogs数据集图像分类器创建它。

import csv
import re
import matplotlib.pyplot as plt
import datetime
import tensorflow as tf
import numpy as np
import random as rd
import os
from PIL import Image
import random


def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
    assert inputs.shape[0] == targets.shape[0]
    if shuffle:
        indices = np.arange(inputs.shape[0])
        np.random.shuffle(indices)
    for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):
        if shuffle:
            excerpt = indices[start_idx:start_idx + batchsize]
        else:
            excerpt = slice(start_idx, start_idx + batchsize)
        yield inputs[excerpt], targets[excerpt]



os.chdir("C:\\Users\\dell\\Documents\\PetImages")
lst = os.listdir()
train_cat = []
train_dog = []
train_x = []
train_y = []
count = 0
for anim in lst:
    os.chdir(anim)
    for img in os.listdir():
        image = Image.open(img).convert('L')
        width, height = image.size
        scale_factor = max(width, height)/100
        image =image.resize((int(width/scale_factor), int(height/scale_factor)))
        imgarr = np.asarray(image)
        zero_array = np.zeros((100,100))
        zero_array[:imgarr.shape[0], :imgarr.shape[1]] = imgarr
        imgarr = zero_array

        #imgarr = tf.keras.utils.normalize(imgarr, axis = 1, order=2)
        imgarr = (imgarr)/255.0
        if anim == "Cat":
                    imgarr = imgarr.flatten()
                    train_cat.append(imgarr)
        else :
                    imgarr = imgarr.flatten()
                    train_dog.append(imgarr)
        #imgplot = plt.imshow(imgarr)
        #print(imgarr)
        #plt.show()
        count+=1
        if count == 500:
            count = 0
            print()
            break
        print(str(count)+" Files Read.", sep=' ', end='\r', flush=True)
    os.chdir('../')
print(len(train_cat))

label_cat = [[1,0] for i in train_cat]
label_dog = [[0,1] for i in train_dog]

train_x.extend(train_cat)
train_x.extend(train_dog)
train_y.extend(label_cat)
train_y.extend(label_dog)
print(len(train_x))
width = 100
height = 100
flat = width * height
class_output = 2
x = tf.placeholder(tf.float32, shape=[None, flat])
y_ = tf.placeholder(tf.float32, shape=[None, class_output])

x_image = tf.reshape(x, [-1,100,100,1])
print(x_image)

W_conv1 = tf.Variable(tf.truncated_normal([5,5,1,32],stddev=0.1))
b_conv1 = tf.Variable(tf.constant(0.1, shape=[32]))

convolve1 = tf.nn.conv2d(x_image, W_conv1, strides=[1,1,1,1], padding="SAME") + b_conv1
h_conv1 = tf.nn.relu(convolve1)
conv1 = tf.nn.max_pool(h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
print(conv1)

W_conv2 = tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1))
b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))

convolve2 = tf.nn.conv2d(conv1, W_conv2, strides=[1,1,1,1], padding="SAME") + b_conv2
h_conv2 = tf.nn.relu(convolve2)
conv2 = tf.nn.max_pool(h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
print(conv2)

layer2_matrix = tf.reshape(conv2, [-1, conv2.shape[1] * conv2.shape[2] * 64])
W_fc1 = tf.Variable(tf.truncated_normal([int(layer2_matrix.shape[1]), 1024], stddev=0.1))
b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))

fc1 = tf.matmul(layer2_matrix, W_fc1)+ b_fc1
h_fc1 = tf.nn.relu(fc1)
print(h_fc1)


keep_prob = tf.placeholder(tf.float32)
layer_drop = tf.nn.dropout(h_fc1, keep_prob)
print(layer_drop)


W_fc2 = tf.Variable(tf.truncated_normal([1024, class_output], stddev=0.1))
b_fc2 = tf.Variable(tf.constant(0.1, shape=[class_output]))

fc2 = tf.matmul(layer_drop, W_fc2)+ b_fc2
h_fc2 = tf.nn.relu(fc2)
print(h_fc2)


y_CNN = tf.nn.softmax(h_fc2)
print(y_CNN)


print(train_y)

print(y_*tf.log(y_CNN))

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_CNN)))

train_step = tf.train.AdamOptimizer(1e-5).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_CNN, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

d = list(zip(train_x,train_y))
random.shuffle(d)
train_x, train_y = zip(*d)

print(train_y)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

n_epochs = 20


for n in range(2,1000):
    batch = (train_x[n-2:n], train_y[n-2:n])
    train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
    loss = cross_entropy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
    print("step %d, training accuracy %g, loss %g"%(n, float(train_accuracy), float(loss)))
    train_step.run(feed_dict={x:batch[0], y_:batch[1], keep_prob: 0.5})

问题在于,当我运行该程序时,损耗先增大然后减小,直到达到恒定值。虽然精度仅在0、0.5和1这三个值之间波动。下面是相同的图像。

The fluctuating accuracy and constant loss

有人可以帮我吗?

0 个答案:

没有答案