我正在尝试预处理大型语料库(大约2MB),使得文本的每个单词根据其后面的2个单词(即以3s为一组)进行分组。所以对于以下输入:
'The man ate the apple'
,我会获得(The, man, ate), (man, ate, the), (ate, the, apple)
。然后我想对每个单词进行矢量化,创建一个数据集(前两个单词用作输入,第三个单词是输出)并将其输入LSTM。
在Google Compute Engine中的实例上运行以下代码时,当我增加(Keras)tokenizer接受的最大单词数时,该进程总是被终止。关于如何使我的代码更有效率的任何想法?
size_of_vocabulary = 1000
def preprocess_corpus():
text = load_corpus(filename)
print("Preprocessing...")
tokenizer = Tokenizer(num_words=size_of_vocabulary)
tokenizer.fit_on_texts([text])
word_index = tokenizer.word_index
reverse_word_index = dict(zip(word_index.values(), word_index.keys()))
return text, word_index, reverse_word_index
def trie_data():
def clean_text(text):
filters = '!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
translate_map = str.maketrans(filters, " " * len(filters))
return text.translate(translate_map)
def vectorize_word(word):
word_vector = np.zeros(size_of_vocabulary-1).astype('float32')
word_vector[word_index[word]] = 1.0
return word_vector
text, word_index, reverse_word_index = preprocess_corpus()
clean_text = clean_text(text).split()
X_data = list()
Y_data = list()
# Use generator (useful for large texts)
def enumerate_data():
for index, word in enumerate(clean_text):
if index+2 < len(clean_text):
if word_index[clean_text[index+2]] < size_of_vocabulary -1:
yield np.asarray([word_index[clean_text[index]], word_index[clean_text[index+1]]]), vectorize_word(clean_text[index+2])
data = enumerate_data()
for i in data:
X_data.append(i[0])
Y_data.append(i[1])
return np.asarray(X_data), np.asarray(Y_data), word_index