Keras分类改进

时间:2017-03-26 18:30:40

标签: classification deep-learning keras conv-neural-network

我想在照片中找到一只狗。我训练了我的模型有2个班级。 0狗,1只非狗。 9500用于培训,3000用于验证。我的模特:

#model dog/nondog categorical more features to extract
model = Sequential()
model.add(Convolution2D(128, 3, 3, input_shape=(3, image_w, image_h)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(16))
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
              metrics=['accuracy'])

我已经训练了90%+准确度。 Accuracy vs Val_Accuracy: http://imgur.com/a/AjJq7 Loss vs Val_Loss: http://imgur.com/a/M6c1M

我对此的处理方法是将狗从非狗狗中分类,然后以50像素(可以更改)的步长查看图像,所以如果"框"它是一只狗它返回我的盒子的位置,然后当扫描完成整个图像时,框被绘制。以下是输出图像的示例(红色框是被分类为狗的框)。 示例输出:http://imgur.com/a/901Gd

我遇到的问题是,当我扫描一个真实的"图像与不同的对象等,它永远不会找到狗本身。它完全错过了狗。 示例输出2:http://imgur.com/a/3wNGu

任何人都可以解释为什么会这样吗?我是否错误地解决了这个问题?我可以做什么改变?任何帮助都会被激活! 谢谢!

完整代码:

# -*- coding: utf-8 -*-
"""
Created on Thu Jan 26 16:21:36 2017

@author: PoLL
"""

from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
import numpy as np
import matplotlib.pyplot as plt
import matplotlib

from PIL import Image
import numpy as np

from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import PIL
from PIL import Image
#draw rect
import matplotlib.patches as patches

#########################################################################################################
#VALUES
# dimensions of images.
image_w, image_h = 150,150
training_direct = 'data1/train'
validation_direct = 'data1/validation'
training_samples = 19000 #8500dogs 8500nondogs
validation_samples = 6000 #3000dogs 3000nondogs
epochs = 30
#########################################################################################################
#MODELS


#model dog/nondog categorical more features to extract
model = Sequential()
model.add(Convolution2D(128, 3, 3, input_shape=(3, image_w, image_h)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(16))
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
              metrics=['accuracy'])

############################################################################################

#Configuration of training data augmentation
training_data_generator = ImageDataGenerator(
        rescale=1./255,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)

# rescaling as preprocessing for better results
test_datagen = ImageDataGenerator(rescale=1./255)
############################################################################################
#PREPARE TRAINING DATA
T_generator = training_data_generator.flow_from_directory(
        training_direct, #data1/train
        target_size=(image_w, image_h),  #RESIZE to 150/150
        batch_size=64,
        class_mode='categorical')  #label categorical/binary

#PREPARE VALIDATION DATA
V_generator = test_datagen.flow_from_directory(
        validation_direct,  #data1/validation
        target_size=(image_w, image_h), #RESIZE 150/150
        batch_size=64,
        class_mode='categorical')  #label categorical/binary


#START model.fit
history =model.fit_generator(
        T_generator, #train data
        samples_per_epoch=training_samples, #parameters
        nb_epoch=epochs,
        validation_data=V_generator,  #validation data
        nb_val_samples=validation_samples)


#check labels 0=dog 1=nondog
labels = (T_generator.class_indices)
print(labels)




############################################################################################
##### WINDOW BOX  TO GO THROUGH THIS IMAGE
image=load_img('finddog/findadog2.jpg')
image= np.array(image).reshape((600,1050,3))
plt.imshow(image)
print(image.shape)


############################################################################################
############################################################################################
#OBJECT IS HERE

#object x,y,w,h,
object0 =  (140, 140, 150,150)
object1 =  (340, 340, 150,150)
#object2 = (130,130,150,150)
objloc  = []
objloc.append(object0)
objloc.append(object1)
#objloc.append(object2)


#SLIDING WINDOW
def find_a_dog(image, step=50, window_sizes=[150]):
    boxCATDOG = 0  
    locations = []
    for win_size in window_sizes:
        #top =y, left =x
        for Y in range(0, image.shape[0] - win_size + 1, step):
            for X in range(0, image.shape[1] - win_size + 1, step):
                # compute the (top, left, bottom, right) of the bounding box
                box = (Y, X, Y + win_size, X + win_size)
                # crop
                cropped_img = image[box[0]:box[2], box[1]:box[3]]
                #reshape cropped image by window             
                cropped_img = np.array(cropped_img).reshape((1,3,150,150))

                #classify it
                boxCATDOG = predict_function(cropped_img)              
                if boxCATDOG ==0:
                 #   print('box classified as dog') 
                    #save location of it                 
                    locations.append(box)

                    print("found dog")


        return locations                                 



############################################################################################                   
#FUNCTIONS   #
def predict_function(x):
    result = model.predict_classes(x)
    if result==1:
        return 1
    else:
        return 0   
#SHOW CROPPED IMAGE
def show_image(im):
   plt.imshow(im.reshape((150,150,3)))
#SHOW INPUT IMAGE
def show_ori_image(im):
   plt.imshow(im.reshape((600,1050,3)))


#DRAW WHERE OBJECT IS   
def draw_obj_loc(image,objectloc):
    fix,ax = plt.subplots(1)
    ax.imshow(image)
    for l in objloc:
        rectG = patches.Rectangle((l[0],l[1]),l[2],l[3],linewidth=1,edgecolor='G',facecolor='none')
        ax.add_patch(rectG)
    print len(objectloc)

#DRAW BOXES FROM LOCATIONS
def draw_boxes(image, locations):
    fix,ax = plt.subplots(1)
    ax.imshow(image)       
    for l in locations:
        print l
        rectR = patches.Rectangle((l[1],l[0]),150,150,linewidth=1,edgecolor='R',facecolor='none')
        ax.add_patch(rectR)
    print len(locations)

#DRAW GREEN AND RED
def draw_both(image, locations,objectloc):
    fix,ax = plt.subplots(1)
    ax.imshow(image)
    for l in objloc:
        rectG = patches.Rectangle((l[0],l[1]),l[2],l[3],linewidth=1,edgecolor='G',facecolor='none')
        ax.add_patch(rectG)
    for l in locations:
        print l
        rectR = patches.Rectangle((l[1],l[0]),150,150,linewidth=1,edgecolor='R',facecolor='none')
        ax.add_patch(rectR)

#CHECK OVERLAPPING GREEN RED
def check_overlapping(image,locations,objloc):   

    for ol in objloc:
        objX = (ol[0])
        objY = (ol[1])
        objW = (ol[2])
        objH = (ol[3])

        for ok in locations:
            X=(ok[0])
            Y=(ok[1])
   # for l in locations:
        #  if (objX+objW<X or X+150<objX or objY+objH<Y or Y+150<objY):
            if (objX+objW<X or X+150<objX or objY+objH<Y or Y+150<objY):
                # Intersection = Empty
                       #no overlapping, false positive
                       print('THERES NO OVERLAPPING :',objloc.index(ol))
                        #                                              
            else:
                        #Intersection = Not Empty
                        print('THERE IS OVERLAPPING WITH OBJECT: ',objloc.index(ol), 'WITH BOX NUMBER: ',locations.index(ok))

############################################################################################       

#get locations from image
locations = find_a_dog(image)
#show where windowslide classifed as positive
draw_boxes(image,locations)

0 个答案:

没有答案