我导出keras模型时,张量流服务产生错误答案

时间:2018-10-18 08:31:49

标签: python tensorflow keras tensorflow-serving

我尝试将我的keras模型导出到tensorflow服务,并且一切正常。我想做的是从客户端接受b64编码的输入图像字符串并输出True / False值。我的keras模型输出3个值,第一个值表示从模型预测的程度,我将其与另一个固定值进行比较,然后将整个算法从获取图像字符串导出到使用RESTful API将True / False值输出到Tensorflow服务。但是,我没有从客户端程序获得正确的输出。长话短说,让我显示代码

我的程序用于导出保存的模型:

df['Date'].dt.strftime('%d/%m/%Y')

这是我的客户程序:

import tensorflow as tf
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants, signature_constants, signature_def_utils_impl
from keras.models import load_model
from keras.layers import Input
import os

tf.app.flags.DEFINE_string('model_dir', './keras_models',
                           '''Directory which contains keras models''')
tf.app.flags.DEFINE_string('output_dir', './model_output',
                           '''Directory where to export the model''')
tf.app.flags.DEFINE_string('model_version', '1',
                           '''version number of the model''')
tf.app.flags.DEFINE_string('model_file', 'pointer_model.json',
                           '''json file which contains model architecture''')
tf.app.flags.DEFINE_string('weights_file', 'pointer_model.h5',
                           '''h5 file that contains model weights''')

FLAGS = tf.app.flags.FLAGS


def preprocess_image(image_buffer):
    '''
    Preprocess JPEG encoded bytes to 3D floate tensor

    :param image_buffer:
    :return: 4D image tensor (1, width, height, channels)
    '''

    image = tf.image.decode_jpeg(image_buffer, channels=3)
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)

    return image


def main(_):
    with tf.Graph().as_default():
        serialized_tf_example = tf.placeholder(tf.string, name='input_image')
        feature_configs = {
            'image/encoded': tf.FixedLenFeature(
                shape=[], dtype=tf.string),
        }
        tf_example = tf.parse_example(serialized_tf_example, feature_configs)
        jpegs = tf_example['image/encoded']
        images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)

        images = tf.squeeze(images, [0])
        images = tf.expand_dims(images, axis=0)
        # now the image shape is [1, ?, ?, 3]
        images = tf.image.resize_images(images, tf.constant([224, 224]))

        model = load_model('./keras_models/my_model.h5')

        x = Input(tensor=images)
        y = model(x)

        model.summary()
        compare_value = tf.Variable(100.0)
        bool_out = tf.math.greater(y, compare_value)

        bool_out = bool_out[:,0]

        bool_out = tf.cast(bool_out, tf.float32)
        bool_out = tf.expand_dims(bool_out, axis=0)
        final_out = tf.concat([tf.transpose(y), bool_out], axis=0)
        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            sess.run(init)

            # predict_tensor_input_info = tf.saved_model.utils.build_tensor_info(jpegs)
            # predict_tensor_score_info = tf.saved_model.utils.build_tensor_info(bool_out)
            prediction_signature = \
                (tf.saved_model.signature_def_utils.predict_signature_def(
                    inputs={'images': jpegs},
                    outputs={'scores': final_out}
                )
            )

            export_path = os.path.join(
                tf.compat.as_bytes(FLAGS.output_dir),
                tf.compat.as_bytes(FLAGS.model_version)
            )

            builder = saved_model_builder.SavedModelBuilder(export_path)

            legacy_init_op = tf.group(tf.tables_initializer(),
                                      name = 'legacy_init_op')

            builder.add_meta_graph_and_variables(
                sess, [tag_constants.SERVING],
                signature_def_map={
                    signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:prediction_signature,
                },
                legacy_init_op = legacy_init_op
            )

            builder.save()

if __name__ =="__main__":
    tf.app.run()

json_response.text的输出如下:

import base64
import requests
import json
import argparse
import time
from glob import glob

image_path = glob('./segmented_image/*.jpg')


for i in range(len(image_path)):

    input_image = open(image_path[i], 'rb').read()

    encoded_input_string = base64.b64encode(input_image)
    input_string = encoded_input_string.decode('utf-8')
    # input_image_recover = base64.b64decode(input_string)
    # with open('recovered_image.jpg', 'wb') as output_file:
    #     output_file.write(input_image_recover)
    #
    # print('Base64 encoded string: ' + input_string[:10] + '...' + input_string[-10:])

    instance = [{"b64": input_string}]
    data = json.dumps({"instances": instance})
    print(data[:30]+ '...' + data[-10:])

    json_response = requests.post('http://localhost:8501/v1/models/pointer_model:predict',
                                  data=data)

    print(json_response.text)
    end_time = time.time()

......

预测键中的前三个值应该是度,图像中的x,y坐标应该是几百个值...最后一个值是与100.0相比强制转换为float32的True / False值

好..最后,我还使用model.predict测试了我的模型,它给出了正确的答案...

现在,我完全感到困惑。有人可以告诉我我的代码哪里出问题了吗?

1 个答案:

答案 0 :(得分:0)

使用我的脚本以张量流服务格式导出

import sys
from keras.models import load_model
import tensorflow as tf
from keras import backend as K
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants


K.set_learning_phase(0)
K.set_image_data_format('channels_last')

INPUT_MODEL = sys.argv[1]
NUMBER_OF_OUTPUTS = 1
OUTPUT_NODE_PREFIX = 'output_node'
OUTPUT_FOLDER= 'frozen'
OUTPUT_GRAPH = 'frozen_model.pb'
OUTPUT_SERVABLE_FOLDER = sys.argv[2]
INPUT_TENSOR = sys.argv[3]


try:
    model = load_model(INPUT_MODEL)
except ValueError as err:
    print('Please check the input saved model file')
    raise err

output = [None]*NUMBER_OF_OUTPUTS
output_node_names = [None]*NUMBER_OF_OUTPUTS
for i in range(NUMBER_OF_OUTPUTS):
    output_node_names[i] = OUTPUT_NODE_PREFIX+str(i)
    output[i] = tf.identity(model.outputs[i], name=output_node_names[i])
print('Output Tensor names: ', output_node_names)


sess = K.get_session()
try:
    frozen_graph = graph_util.convert_variables_to_constants(sess,             sess.graph.as_graph_def(), output_node_names)    
    graph_io.write_graph(frozen_graph, OUTPUT_FOLDER, OUTPUT_GRAPH, as_text=False)
    print(f'Frozen graph ready for inference/serving at     {OUTPUT_FOLDER}/{OUTPUT_GRAPH}')
except:
    print('Error Occured')



builder = tf.saved_model.builder.SavedModelBuilder(OUTPUT_SERVABLE_FOLDER)

with tf.gfile.GFile(f'{OUTPUT_FOLDER}/{OUTPUT_GRAPH}', "rb") as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())

sigs = {}
OUTPUT_TENSOR = output_node_names
with tf.Session(graph=tf.Graph()) as sess:
    tf.import_graph_def(graph_def, name="")
    g = tf.get_default_graph()
    inp = g.get_tensor_by_name(INPUT_TENSOR)
    out = g.get_tensor_by_name(OUTPUT_TENSOR[0] + ':0')

    sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
        tf.saved_model.signature_def_utils.predict_signature_def(
            {"input": inp}, {"outout": out})

    builder.add_meta_graph_and_variables(sess,
                                         [tag_constants.SERVING],
                                         signature_def_map=sigs)
    try:
        builder.save()
        print(f'Model ready for deployment at     {OUTPUT_SERVABLE_FOLDER}/saved_model.pb')
        print('Prediction signature : ')
        print(sigs['serving_default'])
    except:
        print('Error Occured, please checked frozen graph')