我想了解如何训练问答系统和TensorFlow作为后端的拥抱人脸模型(例如BERT,DistilBERT等)。以下是我当前正在使用的逻辑(但是我不确定这种方法是否正确):
接下来,按照拥抱面部文档指南并重试input_id,attention_id和token_type_id,对问题和上下文进行编码。它将用作模型的输入。
def tokenize(questions, contexts):
input_ids, input_masks, input_segments = [],[],[]
for question,context in tqdm_notebook(zip(questions, contexts)):
inputs = tokenizer.encode_plus(question,context, add_special_tokens=True, max_length=512, pad_to_max_length=True,return_attention_mask=True, return_token_type_ids=True )
input_ids.append(inputs['input_ids'])
input_masks.append(inputs['attention_mask'])
input_segments.append(inputs['token_type_ids'])
return [np.asarray(input_ids, dtype='int32'), np.asarray(input_masks, dtype='int32'), np.asarray(input_segments, dtype='int32')]
input_ids_in = tf.keras.layers.Input(shape=(512,), name='input_token', dtype='int32')
input_masks_in = tf.keras.layers.Input(shape=(512,), name='masked_token', dtype='int32')
input_segment_in = tf.keras.layers.Input(shape=(512,), name='segment_token', dtype='int32')
embedding_layer = transformer_model({'inputs':input_ids_in,'attention_mask':input_masks_in,
'token_type_ids':input_segment_in})[0]
X = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(50, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))(embedding_layer)
X = tf.keras.layers.GlobalMaxPool1D()(X)
start_branch = tf.keras.layers.Dense(1024, activation='relu')(X)
start_branch = tf.keras.layers.Dropout(0.3)(start_branch)
start_branch_output = tf.keras.layers.Dense(512, activation='softmax', name='start_branch')(start_branch)
end_branch = tf.keras.layers.Dense(1024, activation='relu')(X)
end_branch = tf.keras.layers.Dropout(0.3)(end_branch)
end_branch_output = tf.keras.layers.Dense(512, activation='softmax', name='end_branch')(end_branch)
model = tf.keras.Model(inputs=[input_ids_in, input_masks_in, input_segment_in], outputs = [start_branch_output, end_branch_output])
我正在使用具有512个单位的最后一个softmax层,因为这是我的最大字数,我的目标是预测索引偏差。