比较lstm与非lstm模型keras

时间:2019-03-06 16:43:35

标签: keras nlp lstm

我的问题来自post

我从上面的链接中获取了代码,并创建了包含lstm层的model2。我将model2model的性能进行了比较,即使链接中包含玩具数据,我也看到了改进。我比较了print(y_prob)print(y_prob_lstm)

是否由于LSTM层而提高了性能?还是还有其他原因?

from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
# define documents
docs = ['Well done!',
        'Good work',
        'Great effort',
        'nice work',
        'Excellent!',
        'Weak',
        'Poor effort!',
        'not good',
        'poor work',
        'Could have done better.']
# define class labels
labels = array([1,1,1,1,1,0,0,0,0,0])


from keras.preprocessing.text import Tokenizer

tokenizer = Tokenizer()

#this creates the dictionary
#IMPORTANT: MUST HAVE ALL DATA - including Test data
#IMPORTANT2: This method should be called only once!!!
tokenizer.fit_on_texts(docs)

#this transforms the texts in to sequences of indices
encoded_docs2 = tokenizer.texts_to_sequences(docs)

encoded_docs2

max_length = 4
padded_docs2 = pad_sequences(encoded_docs2, maxlen=max_length, padding='post')
max_index = array(padded_docs2).reshape((-1,)).max()

from keras.layers import LSTM


# define the model
model = Sequential()
model.add(Embedding(max_index+1, 8, input_length=max_length))# you cannot use just max_index 
#model.add(LSTM(8, return_sequences=True))

model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
# compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
# summarize the model
print(model.summary())
# fit the model
model.fit(padded_docs2, labels, epochs=50, verbose=0)
# evaluate the model
loss, accuracy = model.evaluate(padded_docs2, labels, verbose=0)
print('Accuracy: %f' % (accuracy*100))

y_classes =model.predict_classes(padded_docs2)
y_prob  = model.predict_proba(padded_docs2)


model2 = Sequential()
model2.add(Embedding(max_index+1, 8, input_length=max_length))# you cannot use just max_index 
model2.add(LSTM(8, return_sequences=True))

model2.add(Flatten())
model2.add(Dense(1, activation='sigmoid'))
# compile the model
model2.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
# summarize the model
print(model2.summary())
# fit the model
model2.fit(padded_docs2, labels, epochs=50, verbose=0)
# evaluate the model
loss, accuracy = model2.evaluate(padded_docs2, labels, verbose=0)
print('Accuracy: %f' % (accuracy*100))
y_classes_lstm =model2.predict_classes(padded_docs2)
y_prob_lstm  = model2.predict_proba(padded_docs2)


embeddings = model.layers[0].get_weights()[0]

embeding_for_word_7 = embeddings[14]
index = tokenizer.texts_to_sequences([['well']])[0][0]
tokenizer.document_count
type(tokenizer.word_index)

#is this lstm effect or is it overfitting?

0 个答案:

没有答案