我通过下面的代码加载模型。看起来它不仅加载推理所需的矩阵,而且还加载一些向后的计算矩阵。与相同的caffe模型相比,该模型需要4倍的内存。有什么办法可以减少tf模型的内存消耗?
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
export_dir = 'model_dir'
with tf.Session(graph=tf.Graph(), config=config) as sess:
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_dir)
tensor_input = sess.graph.get_tensor_by_name('input:0')
tensor_input.set_shape(shape=(1,128,128,3))
tensor_output = sess.graph.get_tensor_by_name('output:0')
img = np.random.rand(1,128,128,3)
preds = sess.run(tensor_output, feed_dict = {tensor_input : img})