TensorFlow中LSTM分类序列的OneHotEncoding

时间:2017-11-01 14:36:23

标签: python arrays tensorflow keras one-hot-encoding

从数字标记的一组类别中,我预测序列中的下一个类别。我已经在文本生成器上建模(因此随机标题!)。

我为每个类别创建了一个数字,因此可以通过keras和tensorflow将其解释为通过枚举函数分配这些数字的数字信息。它引发了一个错误,表明我应该使用OneHotEncoding作为输出。我不知道该怎么办。

我已经对信息的OneHotEncoding进行了采样,但我不知道如何将其用于代码正文中/相反如何更改我的代码,以便没有OneHotEncoding的输入有效。

我认为我不太了解M / c学习,我正在自学。

import numpy as np
from numpy import array
from numpy import argmax

import tensorflow as tf

import keras
from keras.utils import to_categorical
from keras.utils import np_utils
from keras.layers import LSTM
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import Input, Dense
from keras.layers import TimeDistributed
from keras.models import Model

data= ['10001426', '10001426','10001426','5121550', '5431000', '10001426', '10001426', '10001466','10001426','5121550', '10001426', '10001426', '10001426','10001426','5431000', '10001426', '10001426', '10001466','10001426','5121550', '5431000', '10001426', '10001426', '10001466','10001426','5121550', '5431000', '10001426', '10001426', '10001466','10001426','5121550', '5431000', '10001426', '10001426', '10001466','10001426','5121550']
data= array(data)

chars=['10001426','5121550','5431000','10001466']
chars= array(chars)
"""
#OneHotEncode - turns the category into an encoded array
encoded = to_categorical(data)
print(encoded)
encoded2 = to_categorical(chars)
print(encoded2)

#Invert OneHotEncode

 inverted = argmax(encoded[0])
 print inverted
 inverted2 = argmax(encoded[0])
 print inverted2
"""
#Parameters
SEQ_LENGTH = 2 # Learn in steps of 2
VOCAB_SIZE = len(chars) #numer of features - how many categories of fault

#Prepare training data

ix_to_char={ix:char for ix, char in enumerate(chars)}
char_to_ix={char:ix for ix, char in enumerate(chars)}

X= np.zeros((len(data)/SEQ_LENGTH, SEQ_LENGTH, VOCAB_SIZE))
y= np.zeros((len(data)/SEQ_LENGTH, SEQ_LENGTH, VOCAB_SIZE))


for i in range((len(data)/SEQ_LENGTH)):
    if (i+1)*SEQ_LENGTH<len(data):       
       X_sequence = data[(i)*SEQ_LENGTH:(i+1)*SEQ_LENGTH]


        X_sequence_ix=[char_to_ix[value] for value in X_sequence]
        input_sequence= np.zeros((SEQ_LENGTH, VOCAB_SIZE))

        print ((i+1)*SEQ_LENGTH, len(data))
        print input_sequence

    for j in range(SEQ_LENGTH):
        input_sequence[j][X_sequence_ix[j]]=1.
    X[i]=input_sequence

    y_sequence = data[i*SEQ_LENGTH+1:(i+1)*(SEQ_LENGTH+1)]
    y_sequence_ix = [char_to_ix[value] for value in y_sequence]
    target_sequence= np.zeros((SEQ_LENGTH, VOCAB_SIZE))

    for j in range(SEQ_LENGTH):
        if (i+1)*(SEQ_LENGTH+1)<(SEQ_LENGTH):
           target_sequence[j][y_sequence_ix[j]]=1
    y[i]=target_sequence
    print y[i]


#Create the network
HIDDEN_DIM=1 
LAYER_NUM= 1

model = Sequential()
model.add(LSTM(HIDDEN_DIM, input_shape=(None, VOCAB_SIZE), 
return_sequences=True))

for i in range(LAYER_NUM-1):
    model.add(LSTM(HIDDEN_DIM, return_sequences=True))         
model.add(Activation('softmax'))
model.compile(loss="categorical_crossentropy",optimizer="rmsprop")


#Train the network

nb_epoch = 0
BATCH_SIZE = 5
GENERATE_LENGTH = 7

while True:
    print ('\n\n')
    model.fit(X,y,batch_size=BATCH_SIZE,verbose=1, epochs=1)
    nb_epoch +=1
    generate_text(model, GENERATE_LENGTH)

    if nb_epoch %5==0:
        model.save_weights('checkpoint_{}_epoch_{}.hdf5'.format(HIDDEN_DIM, nb_epoch))

model.summary()

1 个答案:

答案 0 :(得分:0)

您忘记了最后一层的输出大小为VOCAB_SIZE。您可以通过添加特殊的Dense图层来执行此操作:

for i in range(LAYER_NUM-1):
    model.add(LSTM(HIDDEN_DIM, return_sequences=True)) 
model.add(Dense(VOCAB_SIZE))         
model.add(Activation('softmax'))
model.compile(loss="categorical_crossentropy",optimizer="rmsprop")

或通过设置上一个LSTM图层的适当输出(我将跳过此部分的代码,因为它有点单调乏味)。