我正在训练一个使用keras进行情感分析的LSTM模型。训练训练集可在时期处理过程中提供80%的准确性,但使用相同的训练集评估或预测模型则每次可提供53.9%的准确性。我不能弄清楚问题。而且我已经花了很长时间了。另外,由于我为了节省时间而将数据保存在磁盘上,因此我在以下代码中注释掉了数据加载部分。数据为文本格式,标签为0/1。请帮忙!
import pandas as pd
import Preprocessing as pre
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.utils import shuffle
import pickle
import numpy as np
import sys
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.layers import LSTM
from keras.preprocessing.sequence import pad_sequences
from keras.models import model_from_json
from keras.preprocessing.text import Tokenizer
import os
# fileDir = os.path.dirname(os.path.realpath('__file__'))
# df = pd.read_csv(os.path.join(fileDir, '../Dataset/tweets.csv'),header=None,encoding = "ISO-8859-1")
# df=shuffle(df)
# length=df.size
#
# train=[]
# test=[]
# Y=[]
# Y2=[]
#
# count=450000
# for a in range(450000): #loading data
# b=pre.preprocess_tweet(df[1][a])
# label=int(df[0][a])
# train.append(b)
# Y.append(label)
# count-=1
# print("Loading training data...", count)
#
# with open('training_data(latest).obj', 'wb') as fp:
# pickle.dump(train, fp)
# with open('training_labels(latest).obj', 'wb') as fp:
# pickle.dump(Y, fp)
with open ('training_data(latest).obj', 'rb') as fp:
train = pickle.load(fp)
with open ('training_labels(latest).obj', 'rb') as fp:
Y = pickle.load(fp)
# count=156884
# for a in range(450000,606884): #loading testin data
# b = pre.preprocess_tweet(df[1][a])
# label=int(df[0][a])
# test.append(b)
# Y2.append(label)
# count-=1
# print("Loading testing data...", count)
#
# with open('testing_data(latest).obj', 'wb') as fp:
# pickle.dump(test, fp)
# with open('testing_labels(latest).obj', 'wb') as fp:
# pickle.dump(Y2, fp)
with open ('testing_data(latest).obj', 'rb') as fp:
test = pickle.load(fp)
with open ('testing_labels(latest).obj', 'rb') as fp:
Y2 = pickle.load(fp)
# vectorizer = CountVectorizer(analyzer = "word",tokenizer = None, preprocessor = None, stop_words = None, max_features = 2000)
# # # fit_transform() does two functions: First, it fits the model
# # # and learns the vocabulary; second, it transforms our training data
# # # into feature vectors. The input to fit_transform should be a list of
# # # strings.
#
# train = vectorizer.fit_transform(train)
# test = vectorizer.transform(test)
tokenizer = Tokenizer(split=' ')
tokenizer.fit_on_texts(train)
train = tokenizer.texts_to_sequences(train)
max_words = 134
train = pad_sequences(train, maxlen=max_words)
tokenizer.fit_on_texts(test)
test = tokenizer.texts_to_sequences(test)
test = pad_sequences(test, maxlen=max_words)
print('Extracting features & training batches')
print("Training...")
embedding_size=32
model = Sequential()
model.add(Embedding(606884, 70, input_length=134))
model.add(Dropout(0.4))
model.add(LSTM(128))
model.add(Dense(64))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
print(model.summary())
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
batch_size = 200
num_epochs = 2
model.fit(train, np.array(Y), batch_size=batch_size, epochs=num_epochs ,validation_data=(test,np.array(Y2)),shuffle=True)
# Save the weights
model.save_weights('LSTM_model_weights_updated.h5')
# Save the model architecture
with open('LSTM_model_updated.json', 'w') as f:
f.write(model.to_json())
# #
# Model reconstruction from JSON file
# with open(os.path.join(fileDir, '../Dataset/LSTM_model.json'), 'r') as f:
# model = model_from_json(f.read())
#
# # Load weights into the new model
# model.load_weights(os.path.join(fileDir, '../Dataset/LSTM_model_weights.h5'))
# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
scores = model.evaluate(test, np.array(Y2))
print('Evaluation Test accuracy:', scores[1])
count=0
sum=0
#
#
b=model.predict(test)
for a in b:
print(count)
if a<0.5:
sum = sum + abs(Y2[count] - 0) # error finding
else:
sum=sum+ abs(Y2[count]-1) #error finding
count+=1
acc=100-((sum/156884)*100)
print ("Accuracy=",acc,"count",count)
答案 0 :(得分:1)
您的模型对训练数据有overfitted,因此不能很好地概括测试数据。可能的原因?
解决方案?