我是初学者,需要编写代码以将数据集连接到我在Github中发现的模型的指导。
我的数据集如下:由于stackoverflow不允许我直接显示图像,因此请单击图像链接
This is How it look in my directory which shows the number of samples i have
Inside each sample there are 2 category AM/PM in which my model have to finally classify
This is the time series images, I have 8 images with certail time interval
epochs = 52
time = 8
n_classes = 2
width,height,color_channels = 210,140,3
number_of_hiddenunits = 32
batch_size = 16
def get_conv_vgg(self,input_batch):
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.Conv2D(64, (3,3), padding='same', activation='relu') )(input_batch)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.BatchNormalization())(conv_model)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPool2D(pool_size=(2,2), padding='SAME',strides=(2,2)))(conv_model)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.Conv2D(128, (3,3), padding='same', activation='relu') )(conv_model)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.BatchNormalization())(conv_model)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPool2D(pool_size=(2,2), padding='SAME',strides=(2,2)))(conv_model)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.Conv2D(256, (3,3), padding='same', activation='relu') )(conv_model)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.Conv2D(256, (3,3), padding='same', activation='relu') )(conv_model)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.BatchNormalization())(conv_model)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPool2D(pool_size=(2,2), padding='SAME',strides=(2,2)))(conv_model)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.Conv2D(512, (3,3), padding='same', activation='relu') )(conv_model)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.Conv2D(512, (3,3), padding='same', activation='relu') )(conv_model)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.Conv2D(512, (3,3), padding='same', activation='relu') )(conv_model)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.BatchNormalization())(conv_model)
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPool2D(pool_size=(2,2), padding='SAME',strides=(2,2)))(conv_model)
#embedded
conv_model = tf.keras.layers.TimeDistributed(tf.keras.layers.Flatten())(conv_model)
return conv_model
def create_network(self,model_name):
input_batch = tf.keras.layers.Input(shape = (time,height,width,color_channels))
if model_name == 'vgg':
image_features = self.get_conv_vgg(input_batch)
lstm_network = tf.keras.layers.LSTM(number_of_hiddenunits, return_sequences=True,dropout=0.5,recurrent_dropout=0.5)(image_features)
lstm_network = tf.keras.layers.LSTM(number_of_hiddenunits, return_sequences=False,dropout=0.5,recurrent_dropout=0.5)(lstm_network)
lstm_network = tf.keras.layers.Dense(1024,activation='relu')(lstm_network)
lstm_network = tf.keras.layers.BatchNormalization()(lstm_network)
lstm_network = tf.keras.layers.Dropout(0.5)(lstm_network)
lstm_network = tf.keras.layers.Dense(512,activation='relu')(lstm_network)
lstm_network = tf.keras.layers.Dropout(0.5)(lstm_network)
lstm_network = tf.keras.layers.Dense(64,activation='relu')(lstm_network)
lstm_network = tf.keras.layers.Dropout(0.5)(lstm_network)
lstm_network = tf.keras.layers.Dense(n_classes,activation='softmax')(lstm_network)
elif model_name == 'inception':
image_features = self.get_conv_inception(input_batch)
lstm_network = tf.keras.layers.LSTM(number_of_hiddenunits, return_sequences=True,dropout=0.5,recurrent_dropout=0.5)(image_features)
lstm_network = tf.keras.layers.LSTM(number_of_hiddenunits, return_sequences=False,dropout=0.5,recurrent_dropout=0.5)(lstm_network)
lstm_network = tf.keras.layers.Dense(512,activation='relu')(lstm_network)
lstm_network = tf.keras.layers.Dense(64,activation='relu')(lstm_network)
lstm_network = tf.keras.layers.Dropout(0.5)(lstm_network)
lstm_network = tf.keras.layers.Dense(n_classes,activation='softmax')(lstm_network)
full_network = tf.keras.Model([input_batch],lstm_network)
full_network.summary()
return full_network
def _trainer(network,train_generator,val_generator):
network.compile(optimizer = 'adam', loss= 'binary_crossentropy',metrics = ['accuracy'])
network.save_weights(checkpoint_path.format(epoch=0))
history =network.fit_generator(train_generator,epochs=epochs,steps_per_epoch=len(os.listdir(train_folder)) // batch_size,validation_data=val_generator,validation_steps=1,callbacks=[cp_callback,tensorboard_callback])
看到上述情况后,我的问题是,如果查看上述数据集格式,该如何创建train_generator和val_generator
这是CNN LSTM体系结构,其中8个图像序列通过vgg和LSTM传递
我的疑问是如何传递模型中每个类别的这8张图像进行训练