Im使用张量流定义图,量化图并将其导出为冻结图。冻结的图应转换为张量流光。我尝试遵循此处的指示:https://github.com/tensorflow/tensorflow/tree/r1.13/tensorflow/contrib/quantize
当我尝试调用函数 tf.lite.TFLiteConverter.from_frozen_graph 时,出现错误:
tensorflow.python.framework.errors_impl.InvalidArgumentError:输入0 节点conv2d / weights_quant / AssignMinLast的传递从 conv2d / weights_quant / min:0与预期的float_ref不兼容
这里的探针是什么? 我的代码:
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
with tf.Session() as sess:
x = tf.placeholder(tf.float32, [None, 27648], name="x") # 128*72*3
y_ = tf.placeholder(tf.float32, [None, 72, 128, 1], name="y_") # 128*72*1
# 'reshape':
input_layer = tf.reshape(x, [-1, 72, 128, 3])
# Convolutional Layer
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu,
)
y_out = tf.math.multiply(
conv1,
1.,
name="y_out"
)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_out))
tf.initialize_all_variables().run()
# Build training model
g = tf.get_default_graph()
tf.contrib.quantize.create_training_graph(input_graph=g,
quant_delay=2000000)
tf.initialize_all_variables().run()
# Build eval model
lossEval = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_out))
# Call the eval rewrite which rewrites the graph in-place with
# FakeQuantization nodes and fold batchnorm for eval.
g = tf.get_default_graph()
tf.contrib.quantize.create_eval_graph(input_graph=g)
# Call backward pass optimizer as usual.
learning_rate = 5e-5
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
optimizer.minimize(loss)
writer = tf.summary.FileWriter('./graphs', sess.graph)
#todo: train
export_path = './export/'
print('Exporting trained model to', export_path)
with open(export_path + "qtrained.pbtxt", 'w') as f:
f.write(str(g.as_graph_def()))
saver = tf.train.Saver()
saver.save(sess, export_path+"qtrained.ckpt")
for n in tf.get_default_graph().as_graph_def().node:
print(n.name)
freeze_graph.freeze_graph(input_graph = export_path + "qtrained.pbtxt",
input_saver = "",
input_binary = False,
input_checkpoint = export_path + "./qtrained.ckpt",
output_node_names = "y_out",
restore_op_name = "save/restore_all",
filename_tensor_name = "save/Const:0",
output_graph = './frozen/frozen_model.pb',
clear_devices = False,
initializer_nodes = "")
converter = tf.lite.TFLiteConverter.from_frozen_graph('./frozen/frozen_model.pb', ['x'], ['y_out'])
converter.inference_type = tf.lite.constants.QUANTIZED_UINT8
input_arrays = converter.get_input_arrays()
converter.quantized_input_stats = {input_arrays[0]: (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
open("new_converted_model.tflite", "wb").write(tflite_model)
print("converting done")