我遇到以下错误(标题中的错误),并且在stackoverflow上找不到解决我问题的任何问题,我包含以下代码:
def build_model(embed_df,ohe_df):
models= []
inputs = []
for col in embed_df:
vocab_size = embed_df[col].nunique()
input1 = tf.keras.layers.Input(shape=(1,),name='embed'+'_'+col)
inputs.append(input1)
embed1 = tf.keras.layers.Embedding(vocab_size,200,\
trainable=True,embeddings_initializer='uniform')(input1)
embed_rehsaped =tf.keras.layers.Reshape(target_shape=(200,))(embed1)
models.append(embed_rehsaped)
#input2 = tf.keras.layers.Input(shape=(len(ohe_df)),\
# name='ohe_input')
#inputs.append(input2)
#models.append(input2)
merge_models= tf.keras.layers.concatenate(models)
dense1 = tf.keras.layers.Dense(1000)(merge_models)
bn1 = tf.keras.layers.BatchNormalization()(dense1)
dense2 = tf.keras.layers.Dense(1000)(bn1)
bn2 = tf.keras.layers.BatchNormalization()(dense2)
out = tf.keras.layers.Dense(target_values, activation='sigmoid')(bn2)
full_model = tf.keras.models.Model(inputs=inputs,\
outputs=out)
full_model.compile(loss=tf.keras.losses.categorical_crossentropy,\
metrics=['accuracy'],
optimizer='adam')
return full_model
def prepar_data_set(data_df):
for col in data_df.columns:
encoder = LabelEncoder()
data_df[col] = encoder.fit_transform(data_df[col])
return data_df
embed_df = prepar_data_set(embed_df)
model = build_model(embed_df,ohe_df)
cat_features = embed_df.columns
input_dict = {
"embed_col1":train[cat_features[0]],
"embed_col2":np.array(train[cat_features[1]]),
"embed_col3":np.array(train[cat_features[2]]),
"embed_col4":np.array(train[cat_features[3]]),
"embed_col5":np.array(train[cat_features[4]]),
"embed_col6":np.array(train[cat_features[5]]),
"embed_col7":np.array(train[cat_features[6]])
}
model.fit(input_dict,np.array(target_encoded),epochs=50,batch_size=64)
我无法共享数据,因此我重命名了列,但假设我的数据框具有这样命名的列:“ col1,col2,col3 ... col7”。确切的错误消息将是: 没有为“ embed_col1”提供数据。需要每个键中的数据:['embed_col1','embed_col2','embed_col3','embed_col4','embed_col5','embed_col6','embed_col7'] 。
答案 0 :(得分:0)
此代码解决了该问题,可能是字典键不正确
data = [input_dict[x] for x in input_dict.keys()]
model.fit(data,np.array(target_encoded),epochs=50,batch_size=64)