我希望在Android上使用Tensorflow,使用在GPU上训练的pb文件。
Android上Tensorflow-demo的官方示例使用预先训练的'tensorflow_inception_graph.pb'。 如上所述,我想在Android上使用我的'* .pb'。
首先,我训练了简单的Tensorflow代码,类似于下面的python上的mnist-example。
python代码
def deepnn(x):
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 30, 6, 1])
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
with tf.name_scope('fc1'):
W_fc1 = weight_variable([15 * 3 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 15 * 3 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
with tf.name_scope('fc2'):
W_fc2 = weight_variable([1024, 28])
b_fc2 = bias_variable([28])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
x = tf.placeholder(tf.float32, [None, 180], name='input')
y_ = tf.placeholder(tf.int64, [None])
y_conv, keep_prob = deepnn(x)
y_conv = tf.identity(y_conv, 'output')
with tf.name_scope('loss'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy( labels=y_, logits=y_conv )
cross_entropy = tf.reduce_mean(cross_entropy, name='loss')
y_argout = tf.argmax(input=y_conv, axis=1, name="y_argout")
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), y_)
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init_op)
...
...
tf.train.write_graph(sess.graph_def, './', 'mlp.pb', as_text=False)
x = tf.placeholder(tf.float32, [None, 180], name='input')
和
y_conv = tf.identity(y_conv, 'output')
命名是在Android Studio上使用。
然后,我在Android Studio上使用输出'mlp.pb',如下所示。
android studio上的java代码
inferenceInterface = new TensorFlowInferenceInterface(getAssets(), 'mlp.pb');
...
inferenceInterface.feed('input', floatValues, 180);
inferenceInterface.run(new String[] {'output'}); // an error will occur here.
inferenceInterface.fetch('output', outputs);
发生错误
06-08 09:33:59.500 22971-22971/com.example.tkt.tf_test E/TensorFlowInferenceInterface: Failed to run TensorFlow inference with inputs:[input], outputs:[output]
06-08 09:33:59.501 22971-22971/com.example.tkt.tf_test D/AndroidRuntime: Shutting down VM
06-08 09:33:59.584 22971-22971/com.example.tkt.tf_test E/AndroidRuntime: FATAL EXCEPTION: main
Process: com.example.tkt.tf_test, PID: 22971
java.lang.IllegalArgumentException: No OpKernel was registered to support Op 'SparseSoftmaxCrossEntropyWithLogits' with these attrs. Registered devices: [CPU], Registered kernels:
<no registered kernels>
[[Node: loss/sparse_softmax_cross_entropy_loss/xentropy/xentropy = SparseSoftmaxCrossEntropyWithLogits[T=DT_FLOAT, Tlabels=DT_INT64](output, Placeholder)]]
问题
我非常感谢您的回复。