如何提高神经网络验证的准确性?

时间:2020-08-31 13:51:00

标签: python neural-network

我有一个只有一个输出的多输入模型,我已经尝试了所有方法,但是我的验证准确性非常低,不到1%。我认为我的模型可能出了点问题。有什么我可以提高验证准确性的方法吗?下面是我的模型。

from tensorflow.keras.models import Model 
from tensorflow.keras.layers import Dense, Input, Dropout, concatenate
from tensorflow.keras.layers import Embedding, Conv1D,LSTM, GlobalMaxPooling1D, Concatenate
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Bidirectional
from tensorflow.keras.callbacks import CSVLogger
from tensorflow.keras import regularizers

vocab_size = len(tokenizer.word_index)

input_text = Input(shape=(5,), dtype='int32', name='input_text')
meta_input = Input(shape=(33672,), name='meta_input')
embedding = Embedding(input_dim=len(tokenizer.word_index),
                  output_dim = 20,
                  input_length=5)(input_text)

dropout = Dropout(0.1)(embedding)
pooled_convolutions = []
for kernel_size in [1,2, 3, 4, 5]:
    convolution = Conv1D(filters=20, 
                     kernel_size=kernel_size,
                     padding='valid',
                     strides=1,
                     activation='relu')(dropout)
    pool = GlobalMaxPooling1D()(convolution)
    pooled_convolutions.append(pool)
concatenated = Concatenate()(pooled_convolutions)
dropout = Dropout(0.2)(concatenated)
dense = Dense(100, activation='relu',kernel_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01),
          bias_regularizer=regularizers.l2(0.01),
          activity_regularizer=regularizers.l2(0.01))(dropout)
dropout = Dropout(0.2)(dense)
output1 = Dense(n_classes, activation='relu')(dense)
output2 = Dense(n_classes, activation='relu')(meta_input)
output = concatenate([output1, output2])
main_output = Dense(n_classes, activation='softmax',kernel_regularizer=regularizers.l2(0.01),
                bias_regularizer=regularizers.l2(0.01),
                activity_regularizer=regularizers.l2(0.01))(output)
model = Model(inputs=[input_text,meta_input], outputs=[main_output])
optimer = Adam(lr=.001)
model.compile(optimizer=optimer, 
          loss='sparse_categorical_crossentropy', 
          metrics=['accuracy'])
# Generators
train_generator = generator(df_train,vocab_size,batch_size, tokenizer,onehot,label_encoder)
validation_generator = generator(df_valid,vocab_size,batch_size, tokenizer,onehot,label_encoder)
model.summary()

history = model.fit_generator(generator=train_generator, 
      validation_data=validation_generator,
      epochs=20,steps_per_epoch = len(df_train)/batch_size, validation_steps = 
len(df_valid)/batch_size)

0 个答案:

没有答案