IndexError:索引2992超出了轴0的大小2999的范围

时间:2018-12-31 04:34:18

标签: python tensorflow keras deep-learning

这是我在keras中的代码

batch_size = 16
num_classes = 4
epochs = 30
frames = 5 # The number of frames for each sequence
input_shape = [100 , 100, 3]

def build_rgb_model2():
    model = Sequential()

    model.add(TimeDistributed(Conv2D(32, (3, 3), padding='same'), input_shape=(frames, 120, 180, 3)))
    model.add(TimeDistributed(Activation('relu')))
    model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
    model.add(TimeDistributed(Conv2D(32, (3, 3))))
    model.add(TimeDistributed(Activation('relu')))
    model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
    model.add(TimeDistributed(Conv2D(32, (3, 3))))
    model.add(TimeDistributed(Activation('relu')))
    model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
    model.add(TimeDistributed(Conv2D(32, (3, 3))))
    model.add(TimeDistributed(Activation('relu')))
    model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
    model.add(TimeDistributed(Dropout(0.25)))

    model.add(TimeDistributed(Flatten()))
    model.add(TimeDistributed(Dense(512)))

    model.add(TimeDistributed(Dense(32, name="first_dense_rgb")))

    model.add(LSTM(20, return_sequences=True, name="lstm_layer_rgb"));

    model.add(TimeDistributed(Dense(num_classes), name="time_distr_dense_one_rgb"))
    model.add(GlobalAveragePooling1D(name="global_avg_rgb"))

    return model


def build_rgb_model():
    model = Sequential()
    model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same', activation='relu' ), input_shape=(frames, 120, 180, 3)))


#model.add(TimeDistributed(Activation('relu')))

   # model.add(TimeDistributed(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(frames, 120, 180, 3))))
    model.add(TimeDistributed(Conv2D(64, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(128, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(128, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Dropout(0.25)))
    model.add(TimeDistributed(Flatten()))
    model.add(TimeDistributed(Dense(1024)))

    model.add(TimeDistributed(Dense(32, name="first_dense_rgb")))

    model.add(LSTM(20, return_sequences=True, name="lstm_layer_rgb"));

    model.add(TimeDistributed(Dense(num_classes), name="time_distr_dense_one_rgb"))
    model.add(GlobalAveragePooling1D(name="global_avg_rgb"))

    return model


def build_flow_model():
    model = Sequential()
    model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same',activation='relu'), input_shape=(frames, 120, 180, 2)))
   # model.add(TimeDistributed(Activation('relu')))

    #model.add(TimeDistributed(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(frames, 120, 180, 2))))
    model.add(TimeDistributed(Conv2D(64, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(128, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(128, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Dropout(0.25)))
    model.add(TimeDistributed(Flatten()))
    model.add(TimeDistributed(Dense(1024)))

    model.add(TimeDistributed(Dense(32, name="first_dense_flow")))

    model.add(LSTM(20, return_sequences=True, name="lstm_layer_flow"));

    model.add(TimeDistributed(Dense(num_classes), name="time_distr_dense_one_flow"))
    model.add(GlobalAveragePooling1D(name="global_avg_flow"))

    return model


def build_model():
    rgb_model = build_rgb_model()
    flow_model = build_flow_model()



    out=average([rgb_model.output, flow_model.output])
    model=Model([rgb_model.input,flow_model.input], out)

    #model.add(add([rgb_model, flow_model]))


    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    plot_model(model, to_file='model/cnn_lstm.png')

    return model


def batch_iter(split_file):
    split_data = np.genfromtxt(split_file, dtype=None, delimiter=",")
    total_seq_num = len(split_data)

    ADRi = "/UCF3"
    split_data2 = np.genfromtxt("C.txt", dtype=None, delimiter=",")
    num_batches_per_epoch = int(((int(split_data2[4])-1)/frames - 1) / batch_size)-300
    total_frames=int(split_data2[4])-1
    listing = sorted(os.listdir(ADRi))
    indices2=[]

    def data_generator():
        p = 0
        while 1:
            indices = np.random.permutation(np.arange(total_seq_num))
            t=0
            for j in range(total_seq_num):
                for k in range(int(split_data[j][1]/frames)):
                    indices2.append (j*1000+k*frames)
                    t=t+1
            indices3 = np.random.permutation(np.arange(indices2.__len__()))
            for batch_num in range(num_batches_per_epoch): # for each batch
                start_index = batch_num * batch_size
                end_index = ((batch_num + 1) * batch_size) -1

                RGB = []
                FLOW = []
                Y = []
                for i in range(start_index, end_index): # for each sequence
                    ii=int(indices3[i]/1000) # seqnumber
                    image_dir = split_data[indices[ii]][0].decode("UTF-8")
                    seq_len = int(split_data[indices[ii]][1])
                    y = int(split_data[indices[ii]][2])

                    # To reduce the computational time, data augmentation is performed for each frame
                    jj= min( int(indices3[i]/1000), seq_len-frames-1)
                    augs_rgb = []
                    augs_flow = []
                    for j in range(jj,jj+frames): # for each frame
                        # Get frames at regular interval. start from frame index 1
                        frame = j
                        hf = h5py.File(image_dir+".h5", 'r')
                        # rgb image
                        im = hf.get(str(frame))
                        #rgb_i = load_img("%s/img_%05d.jpg" % (image_dir, frame), target_size=(224, 224))
                        rgb =  im[:, :, :, (3, 5, 7)].transpose(0,3,1,2)
                        #img_gen = ImageDataGenerator(horizontal_flip=True)
                         #= img_gen.apply_transform(rgb)
                        #img = Image.fromarray(rgb)
                        #rgb_flip_i=img.transpose(Image.FLIP_LEFT_RIGHT) # augmentation
                        rgb_flip = np.flip(rgb,2)
                        #t=np.append(rgb, rgb_flip,axis=0)
                        t=np.concatenate([rgb],axis=0)
                        augs_rgb.append(t)

                        # flow image
                        flow_x=im[:, :, :, 1]
                        flow_y = im[:, :, :, 2]

                        flow_x_flip = - np.flip(flow_x,2) # augmentation
                        flow_y_flip =   np.flip(flow_y,2) # augmentation

                        flow = np.concatenate([flow_x, flow_y], axis=0)
                        flow_flip = np.concatenate([flow_x_flip, flow_y_flip], axis=0)
                        #tt=np.concatenate([flow[None,:,:,:], flow_flip[None,:,:,:]], axis=0)
                        tt = np.concatenate([flow[None, :, :, :]], axis=0)
                        augs_flow.append(tt)

                    augs_rgb = np.array(augs_rgb).transpose((1, 0, 3, 4, 2))
                    augs_flow = np.array(augs_flow).transpose((1, 0, 3, 4, 2))
                    RGB.extend(augs_rgb)
                    FLOW.extend(augs_flow)
                    Y.extend([y])

                RGB1 = np.array(RGB)
                FLOW1 = np.array(FLOW)
                Y1 = np_utils.to_categorical(Y, num_classes)
                p=p+1

                yield ([RGB1, FLOW1], Y1)

    return num_batches_per_epoch, data_generator()




if __name__ == "__main__":

    # Build model
    model = build_model()
    model.summary()
    print("Built model")

    # Make batches
    train_steps, train_batches = batch_iter(train_split_file)
    valid_steps, valid_batches = batch_iter(test_split_file)

    # Train model
    history = model.fit_generator(train_batches, steps_per_epoch=train_steps,
                epochs=10, verbose=1, validation_data=valid_batches,
                validation_steps=valid_steps)
    plot_history(history)
    print("Trained model")

当我将train_steps设置为100时,它可以工作。当我在上次运行中设置真实样本(462)时,出现此错误

  

461/462 [===========================>。]-ETA:2秒-损失:6.6250-   acc:0.4111Traceback(最近通话最近):        Two.py“,第332行,在           validate_steps = valid_steps)         文件“ /home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/legacy/interfaces.py”,   第91行,在包装器中           return func(* args,** kwargs)         文件“ /home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/engine/training.py”,   在fit_generator中的第1418行           initial_epoch = initial_epoch)         文件“ /home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/engine/training_generator.py”,   在fit_generator中的第234行           工人= 0)         文件“ /home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/legacy/interfaces.py”,   第91行,在包装器中           return func(* args,** kwargs)         文件“ /home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/engine/training.py”,   第1472行,在validate_generator中           详细=详细)         文件“ /home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/engine/training_generator.py”,   第330行,在evaluate_generator中           generator_output =下一步(output_generator)         文件“ /home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/utils/data_utils.py”,   709行,进入           six.reraise(* sys.exc_info())         文件“ /home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/utils/data_utils.py”,   685行,进入           输入= self.queue.get(block = True).get()         在get中的文件“ /usr/lib/python2.7/multiprocessing/pool.py”,行567           提高自我价值       IndexError:索引2992超出了尺寸为2992的轴0的边界

我没有任何2992个Prometric值!

1 个答案:

答案 0 :(得分:0)

python中的数组从0开始,因此,如果有2992个元素,则最后一个是2991。