尝试使用Keras / Theano运行LSTM多标签文本分类。
我有一个文本/标签csv。文本是纯文本,标签是数字,总共9个,从1到9。
我认为我没有针对此问题正确配置模型。到目前为止我的代码:
import keras.preprocessing.text
import numpy as np
Using Theano backend.
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
import pandas
data = pandas.read_csv("for_keras_text_label.csv", sep = ',', quotechar = '"', header = 0)
x = data['text']
y = data['label']
x = x.iloc[:].values
y = y.iloc[:].values
tk = keras.preprocessing.text.Tokenizer(nb_words=2000, filters=keras.preprocessing.text.base_filter(), lower=True, split=" ")
tk.fit_on_texts(x)
x = tk.texts_to_sequences(x)
max_len = 80
print "max_len ", max_len
print('Pad sequences (samples x time)')
x = sequence.pad_sequences(x, maxlen=max_len)
# the model
max_features = 20000
model = Sequential()
model.add(Embedding(max_features, 128, input_length=max_len, dropout=0.2))
model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2))
model.add(Dense(9))
model.add(Activation('softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='rmsprop', metrics=["accuracy"])
# run
model.fit(x, y=y, batch_size=200, nb_epoch=1, verbose=1, validation_split=0.2, shuffle=True)
我收到此错误:
IndexError: index 9 is out of bounds for axis 1 with size 9 Apply node that caused the error:
AdvancedIncSubtensor{inplace=False, set_instead_of_inc=True}(Alloc.0, TensorConstant{1}, ARange{dtype='int64'}.0, Elemwise{Cast{int32}}.0)
Toposort index: 213
Inputs types: [TensorType(float32, matrix), TensorType(int8, scalar), TensorType(int64, vector), TensorType(int32, vector)]
Inputs shapes: [(200, 9), (), (200,), (200,)]
Inputs strides: [(36, 4), (), (8,), (4,)]
Inputs values: ['not shown', array(1, dtype=int8), 'not shown', 'not shown']
Outputs clients: [[Reshape{2}(AdvancedIncSubtensor{inplace=False, set_instead_of_inc=True}.0, MakeVector{dtype='int64'}.0)]]
Backtrace when the node is created(use Theano flag traceback.limit=N to make it longer):
File "/home/ubuntu/anaconda3/envs/theano/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2827, in run_ast_nodes
if self.run_code(code, result):
File "/home/ubuntu/anaconda3/envs/theano/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2881, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-14-5264b8e23f0a>", line 7, in <module>
model.compile(loss='sparse_categorical_crossentropy', optimizer='rmsprop', metrics=["accuracy"])
File "/home/ubuntu/anaconda3/envs/theano/lib/python2.7/site-packages/keras/models.py", line 578, in compile
**kwargs)
File "/home/ubuntu/anaconda3/envs/theano/lib/python2.7/site-packages/keras/engine/training.py", line 604, in compile
sample_weight, mask)
File "/home/ubuntu/anaconda3/envs/theano/lib/python2.7/site-packages/keras/engine/training.py", line 303, in weighted
score_array = fn(y_true, y_pred)
File "/home/ubuntu/anaconda3/envs/theano/lib/python2.7/site-packages/keras/objectives.py", line 45, in sparse_categorical_crossentropy
return K.sparse_categorical_crossentropy(y_pred, y_true)
File "/home/ubuntu/anaconda3/envs/theano/lib/python2.7/site-packages/keras/backend/theano_backend.py", line 1079, in sparse_categorical_crossentropy
target = T.extra_ops.to_one_hot(target, nb_class=output.shape[-1])
答案 0 :(得分:0)
请查看以下示例,了解使用LSTM进行多标签分类的情况。
import numpy as np
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dropout
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
X_train = np.array(["new york is a hell of a town",
"new york was originally dutch",
"the big apple is great",
"new york is also called the big apple",
"nyc is nice",
"people abbreviate new york city as nyc",
"the capital of great britain is london",
"london is in the uk",
"london is in england",
"london is in great britain",
"it rains a lot in london",
"london hosts the british museum",
"new york is great and so is london",
"i like london better than new york"])
y_train_text = [["new york"],["new york"],["new york"],["new york"],
["new york"],["new york"],["london"],["london"],
["london"],["london"],["london"],["london"],
["new york","England"],["new york","london"]]
lb = preprocessing.MultiLabelBinarizer(classes=("new york","london","England"))
Y = lb.fit_transform(y_train_text)
#Y_test = lb.fit_transform(y_test_text)
#print(Y_test)
tokenizer = Tokenizer(num_words=1000, lower=True)
tokenizer.fit_on_texts(X_train)
X_train1 = tokenizer.texts_to_sequences(X_train)
maxlen = 100
X_train2 = pad_sequences(X_train1, maxlen=maxlen)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X_train2, Y, test_size=0.1, random_state=42)
max_features=1000
emb_dim = 32
model = Sequential()
model.add(Embedding(max_features, emb_dim, input_length=X_train2.shape[1]))
model.add(LSTM(128))
model.add(Dense(3, activation='sigmoid'))
# model.compile(optimizer=Adam(0.015), loss='binary_crossentropy', metrics=['acc'])
model.compile(optimizer=Adam(0.015), loss='categorical_crossentropy', metrics=['acc'])
callbacks=[EarlyStopping(monitor='val_loss', patience=2, min_delta=0.0001), ModelCheckpoint(filepath='model-simple.h5', save_best_only=True)]
hist = model.fit(x_train, y_train, epochs=10, batch_size=32, validation_split=0.1, callbacks=callbacks)
输出:
Train on 10 samples, validate on 2 samples
Epoch 1/10
10/10 [==============================] - 2s 183ms/step - loss: 1.3170 - acc: 0.6000 - val_loss: 1.0557 - val_acc: 0.5000
Epoch 2/10
10/10 [==============================] - 0s 27ms/step - loss: 1.2505 - acc: 0.7000 - val_loss: 1.0371 - val_acc: 0.5000
Epoch 3/10
10/10 [==============================] - 0s 27ms/step - loss: 1.2833 - acc: 0.7000 - val_loss: 0.7377 - val_acc: 0.5000
Epoch 4/10
10/10 [==============================] - 0s 27ms/step - loss: 1.0988 - acc: 0.7000 - val_loss: 0.8957 - val_acc: 0.5000
Epoch 5/10
10/10 [==============================] - 0s 28ms/step - loss: 1.1339 - acc: 0.7000 - val_loss: 0.8717 - val_acc: 0.5000
注意:训练模型的性能应微调。