适用于具有不同序列长度的LSTM的predict_generator

时间:2019-10-02 13:02:56

标签: python-3.x keras deep-learning lstm

我有不同长度的序列,我想训练一个基于LSTM的神经网络,它将使用前面的Nth标记来预测每个N-1标记。

但是,培训的结果很奇怪;几个时期之后,准确性(在训练和验证中)下降到接近0,而损失也下降到接近0。

由于其无法解释的行为(描述为here),我决定更深入地研究并查看模型的每次迭代所预测的结果。

为此,我想加载每个模型并将其用于验证数据的预测,并将其与模型的原始拟合所生成的度量进行比较。

问题是,我收到以下错误消息:

  File "/home/user/experiments/LSTM/1/use_model.py", line 65, in main
    prediction = model.predict_generator(test_generator(val_list),steps = len(val_list))
  File "/home/user/.local/lib/python3.6/site-packages/keras/legacy/interfaces.py", line 91, in wrapper
    return func(*args, **kwargs)
  File "/home/user/.local/lib/python3.6/site-packages/keras/engine/training.py", line 1522, in predict_generator
    verbose=verbose)
  File "/home/user/.local/lib/python3.6/site-packages/keras/engine/training_generator.py", line 474, in predict_generator
    return np.concatenate(all_outs[0])
ValueError: all the input array dimensions except for the concatenation axis must match exactly

这似乎是由于LSTM输入的大小不同而发生的。 但是,尽管predict_generator失败了,但fit_generator似乎没有问题。

为什么predict_generator失败了,而fit_generator成功了呢? 如何成功预测使用不同长度的序列?

我的代码如下:

import numpy as np
import glob

import keras
from keras.models import Sequential, load_model 
from keras.layers import LSTM, Dense, TimeDistributed,Lambda, Dropout, Activation
from keras.metrics import top_k_categorical_accuracy
from keras.callbacks import ModelCheckpoint

###
import matplotlib
matplotlib.use('Agg') # prevents it from failing when there is no display
import matplotlib.pyplot as plt
import keras.backend as K
from keras.utils import plot_model

###

name='smash7'
model_designation=str(name)+'_'

train_val_split=0.2 # portion to be placed in validation


train_control_number=0
val_control_number=0
test_control_number=0
batch_size = 16




def my_3D_top_5(true, pred):
    features_num=int(list(pred.shape)[-1])

    true = K.reshape(true, (-1, features_num))   
    pred = K.reshape(pred, (-1, features_num))
    return top_k_categorical_accuracy(true, pred, k=5)

def my_3D_top_10(true, pred):
    features_num=int(list(pred.shape)[-1])

    true = K.reshape(true, (-1, features_num))   
    pred = K.reshape(pred, (-1, features_num))
    return top_k_categorical_accuracy(true, pred, k=10)



def main ():
    input_files=glob.glob('*npy')
    data_list,dim=loader(input_files)
    train_list,val_list=data_spliter(data_list)

    train_list=group_data(train_list,batch_size)
    val_list=group_data(val_list,batch_size)


    dependencies={'my_3D_top_5' : my_3D_top_5,'my_3D_top_10' : my_3D_top_10}
    model=load_model('saved-model-try7_-24.hdf5',custom_objects=dependencies)

    #For debugging - to check that train_generator and val_generator work####
    model.fit_generator(train_generator(train_list), steps_per_epoch=len(train_list), epochs=1, verbose=1,validation_data=val_generator(val_list),validation_steps=len(val_list))
    #########################################################################

    prediction = model.predict_generator(test_generator(val_list),steps = len(val_list))


def group_data(data_list,size):  # groups data and elongate it to match
    output=[]
    list_of_sizes=[]
    for data in data_list:
        list_of_sizes.append(list(data.shape)[1]) 

    data_list = [x for _, x in sorted(zip(list_of_sizes,data_list), key=lambda pair: pair[0])]

    while len(data_list)>size:
        this=data_list[:size]
        data_list=data_list[size:]
        combined=(elongate_and_combine(this))
        output.append(combined)


    combined=(elongate_and_combine(data_list))
    output.append(combined)


    return (output)

def elongate_and_combine(data_list):

    max_length= (list(data_list[-1].shape)[1]) 
    last_element=list.pop(data_list)
    output=last_element




    stop_codon=last_element[0,(max_length-1),:]
    stop_codon=stop_codon.reshape(1,1,stop_codon.size)

    for data in data_list:
        size_of_data=list(data.shape)[1]
        while size_of_data<max_length:
            data=np.append(data, stop_codon, axis=1)
            size_of_data=list(data.shape)[1]
        output=np.append(output, data, axis=0)


    return (output)


def train_generator(data_list):
    while True:
        global train_control_number
        train_control_number=cycle_throught(len(data_list),train_control_number)
        #print (train_control_number)       
        this=data_list[train_control_number]


        x_train = this [:,:-1,:] # all but the last 1
        y_train = this [:,1:,:] # all but the first 1

        yield (x_train, y_train)



def val_generator(data_list):
    while True:
        global val_control_number
        val_control_number=cycle_throught(len(data_list),val_control_number)
        #print (val_control_number)     
        this=data_list[val_control_number]
        x_train = this [:,:-1,:] # all but the last 1
        y_train = this [:,1:,:] # all but the first 1

        yield (x_train, y_train)

def test_generator(data_list):
    while True:
        global test_control_number
        test_control_number=cycle_throught(len(data_list),test_control_number)
        #print (test_control_number)        
        this=data_list[test_control_number]
        x_train = this [:,:-1,:] # all but the last 1
        y_train = this [:,1:,:] # all but the first 1

        yield (x_train, y_train)


def cycle_throught (total,current):
    current+=1
    if (current==total):
        current=0
    return (current)


def loader(input_files):

    data_list=[]

    for input_file in input_files:
        a=np.load (input_file)
        incoming_shape=list(a.shape)
        requested_shape=[1]+incoming_shape
        a=a.reshape(requested_shape)
        #print (a.shape)
        data_list.append(a)


    return (data_list,incoming_shape[-1])


def data_spliter(input_list):
    val_num=int(len(input_list)*train_val_split)
    validation=input_list[:val_num]
    train=input_list[val_num:]

    return (train,validation)

main()

0 个答案:

没有答案