以下代码是使用单词嵌入来预测标签的非常简单的示例(请参见下文)。该示例摘自here。
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
# define documents
docs = ['Well done!',
'Good work',
'Great effort',
'nice work',
'Excellent!',
'Weak',
'Poor effort!',
'not good',
'poor work',
'Could have done better.']
# define class labels
labels = array([1,1,1,1,1,0,0,0,0,0])
# integer encode the documents
vocab_size = 50
encoded_docs = [one_hot(d, vocab_size) for d in docs]
print(encoded_docs)
# pad documents to a max length of 4 words
max_length = 4
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
print(padded_docs)
# define the model
model = Sequential()
model.add(Embedding(vocab_size, 8, input_length=max_length))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
# compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
# summarize the model
print(model.summary())
# fit the model
model.fit(padded_docs, labels, epochs=50, verbose=0)
# evaluate the model
loss, accuracy = model.evaluate(padded_docs, labels, verbose=0)
print('Accuracy: %f' % (accuracy*100))
让我们说我们具有如下结构化数据:
hours_of_revision = [10, 5, 7, 3, 100, 0, 1, 0.5, 4, 0.75]
这里每个条目都与每一行对齐,很好地表明了一个人应该花更多的时间来修改以获得良好的分数(-:
想知道,有人可以将其合并到模型中以使用文本和结构化数据吗?
答案 0 :(得分:1)
是的,这可以通过Keras的Functional API实现。您所需要做的就是为hours_of_revision
添加一个额外的输入,并与文本数据中的嵌入内容并置,然后再进入最终分类器。
首先扩展其他数据:
# additional data
hours_of_revision = [10, 5, 7, 3, 100, 0, 1, 0.5, 4, 0.75]
import numpy as np
# Scale the data
mean = np.mean(hours_of_revision)
std = np.std(hours_of_revision)
hours_of_revision = (hours_of_revision - mean)/std
hours_of_revision
使用Functional API构建模型:
# Build model
from keras.layers import Input, Embedding, Flatten, Dense, Concatenate
from keras.models import Model
# Two input layers
integer_input = Input((max_length, ))
revision_input = Input((1,))
# Embedding layer for the words
embedding = Embedding(vocab_size, 8, input_length=max_length)(integer_input)
embedding_flat = Flatten()(embedding)
# Concatenate embedding with revision
combined_data = Concatenate()([embedding_flat, revision_input])
output = Dense(1, activation='sigmoid')(combined_data)
# compile the model - pass a list of input tensors
model = Model(inputs=[integer_input, revision_input], outputs=output)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
# fit the model - pass list of input data
model.fit([padded_docs, hours_of_revision], labels, epochs=50, verbose=0)
有关如何将Functional API用于多输入/多输出模型的更多示例,请查看Keras docs。