我已经构建了一个自定义层,我希望在6层上训练我的数据。我层的第一个输入工作正常,但第二层的输入却不行。这是因为我的代码中的_features = tf.reshape(_features, [batch_size, -1])
部分。在第一层完成之后,我对第二层的输入将变平。每层的反向传播过程完成后,是否可以将其转换回其常规形状?
class convLayer(layers.Layer):
def __init__(self, output_dim, adjacency, batch_size, **kwargs):
self.output_dim = output_dim
self.adjacency = adjacency # adj shape is [should be batch size , 50, 50]
self.batch_size = batch_size # hyper parameter
super(convLayer, self).__init__(**kwargs)
def build(self, input_shape):
shape = tf.TensorShape((input_shape[1], self.output_dim))
shape = [int(shape[0]),int(shape[1])] # [50 , 32]
self.kernel = self.add_weight(... shape = shape)
self.bias = self.add_weight(... shape = shape)
super(convLayer, self).build(input_shape)
def call(self, inputs):
_features = conv_models.GCN(inputs,self.adjacency,self.kernel,self.bias) #[ batch_size, 50, 32]
_features = tf.reshape(_features, [batch_size, -1]) # [batch_size 1600], fl. 32
_features = tf.cast(_features, tf.float64) # [batch_size 1600], fl. 64
return _features
conv_output = 32
K.set_learning_phase(1)
batch_size = 100
num_layers = 6
model = tf.keras.Sequential()
#where i add the layers
for i in range(num_layers):
model.add(convLayer(32,adj[:100],batch_size))
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss='mse',
metrics=['mae'])
model.fit(features[0:100], test[0:100],batch_size = 100)