def profiler(model, starting_layer_name, test_input):
# print(starting_layer_name)
layer_input = layers.Input( batch_shape=model.get_layer(
starting_layer_name ).get_input_shape_at( 0 ) )
print( layer_input )
x = layer_input
for layer in model.layers:
x = layer( x )
intermediate_model = keras.Model( layer_input, x )
start = time.time()
intermediate_model = intermediate_model.predict( test_input )
end = time.time() - start
def split(model, input):
# this is the split point, i.e. the starting layer in our sub-model
starting_layer_name = input
new_output = input
new_input = layers.Input( batch_shape=model.get_layer( starting_layer_name ).get_input_shape_at( 0 ) )
layer_outputs = {}
def get_output_of_layer(layer):
if layer.name in layer_outputs:
return layer_outputs[layer.name]
if layer.name == starting_layer_name:
out = layer( new_input )
layer_outputs[layer.name] = out
return out
prev_layers = []
for node in layer._inbound_nodes:
prev_layers.extend( node.inbound_layers )
# get the output of connected layers
pl_outs = []
for pl in prev_layers:
pl_outs.extend( [get_output_of_layer( pl )] )
out = layer( pl_outs[0] if len( pl_outs ) == 1 else pl_outs )
layer_outputs[layer.name] = out
return out
if starting_layer_name == 'input_1':
new_output = get_output_of_layer( model.layers[-139] )
else:
new_output = get_output_of_layer( model.layers[-131] )
if starting_layer_name == 'input_1':
model = models.Model( new_input, new_output )
profiler( model, starting_layer_name, processed_image )
elif starting_layer_name == 'block_1_project_BN':
model = models.Model( starting_layer_name, new_output )
profiler( model, starting_layer_name, processed_image )
split( model, 'input_1' )
split( model, 'block_1_project_BN' )
我需要遍历一个预先训练的非顺序模型,并在该模型中发现分支的地方进行分割,然后将其划分为子模型。然后,我需要第一个模型的模型预测结果的输出,第二个模型的模型预测结果的输出。
例如模型A(最后一层预测结果的输出)->模型B
当上面的代码在下面编译时,会引发错误
Input tensors to a Model must come from `keras.layers.Input`. Received:
block_1_project_BN (missing previous layer metadata).
答案 0 :(得分:1)
请在how to use block i.e. output of a layer to other的“自定义层”部分中查看此示例 礼貌-tensorflow教程。
我们创建了一个类,该类本身不过是一个resnet模型块,最后,您可以看到还添加了identify函数。
Models: Composing layers
Many interesting layer-like things in machine learning models are implemented by composing existing layers. For example, each residual block in a resnet is a composition of convolutions, batch normalizations, and a shortcut. Layers can be nested inside other layers.
Typically you inherit from keras.Model when you need the model methods like: Model.fit,Model.evaluate, and Model.save (see Custom Keras layers and models for details).
One other feature provided by keras.Model (instead of keras.layers.Layer) is that in addition to tracking variables, a keras.Model also tracks its internal layers, making them easier to inspect.
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
object of resnet class
block = ResnetIdentityBlock(1, [1, 2, 3])
def chain_blocks(input):
x1 = block(input)
x2 = block(x1)
....
....
return xn
so on you can bind up the resnet flow in sequential way within a function., moreover if you want to add another layer after a block you can do that, just make sure output shape of block should be same as input shape of next layer.
让我知道是否需要其他信息。