我已将对象检测模型部署到Google Kubernetes Engine。我的模型是使用faster_rcnn_resnet101_pets配置进行训练的。即使我在群集节点中使用Nvidia Tesla K80 GPU,我的模型的推理时间也非常高(用于预测和的总时间约为10秒)。我正在使用gRPC从模型中获取谓词。发出请求请求的脚本是:
import argparse
import os
import time
import sys
import tensorflow as tf
from PIL import Image
import numpy as np
from grpc.beta import implementations
sys.path.append("..")
from object_detection.core.standard_fields import \
DetectionResultFields as dt_fields
from object_detection.utils import label_map_util
from argparse import RawTextHelpFormatter
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
tf.logging.set_verbosity(tf.logging.INFO)
WIDTH = 1024
HEIGHT = 768
def load_image_into_numpy_array(input_image):
image = Image.open(input_image)
image = image.resize((WIDTH, HEIGHT), Image.ANTIALIAS)
(im_width, im_height) = image.size
image_arr = np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
image.close()
return image_arr
def load_input_tensor(input_image):
image_np = load_image_into_numpy_array(input_image)
image_np_expanded = np.expand_dims(image_np, axis=0).astype(np.uint8)
tensor = tf.contrib.util.make_tensor_proto(image_np_expanded)
return tensor
def main(args):
start_main = time.time()
host, port = args.server.split(':')
channel = implementations.insecure_channel(host, int(port))._channel
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = args.model_name
input_tensor = load_input_tensor(args.input_image)
request.inputs['inputs'].CopyFrom(input_tensor)
start = time.time()
result = stub.Predict(request, 60.0)
end = time.time()
output_dict = {}
output_dict[dt_fields.detection_classes] = np.squeeze(
result.outputs[dt_fields.detection_classes].float_val).astype(np.uint8)
output_dict[dt_fields.detection_boxes] = np.reshape(
result.outputs[dt_fields.detection_boxes].float_val, (-1, 4))
output_dict[dt_fields.detection_scores] = np.squeeze(
result.outputs[dt_fields.detection_scores].float_val)
category_index = label_map_util.create_category_index_from_labelmap(args.label_map,
use_display_name=True)
classes = output_dict[dt_fields.detection_classes]
scores = output_dict[dt_fields.detection_scores]
classes.shape = (1, 300)
scores.shape = (1, 300)
print("prediction time : " + str(end-start))
objects = []
threshold = 0.5 # in order to get higher percentages you need to lower this number; usually at 0.01 you get 100% predicted objects
for index, value in enumerate(classes[0]):
object_dict = {}
if scores[0, index] > threshold:
object_dict[(category_index.get(value)).get('name').encode('utf8')] = \
scores[0, index]
objects.append(object_dict)
print(objects)
end_main = time.time()
print("Overall Time : " + str(end_main-start_main))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Object detection grpc client.",
formatter_class=RawTextHelpFormatter)
parser.add_argument('--server',
type=str,
default='localhost:9000',
help='PredictionService host:port')
parser.add_argument('--model_name',
type=str,
default="my-model",
help='Name of the model')
parser.add_argument('--input_image',
type=str,
default='./test_images/123.jpg',
help='Path to input image')
parser.add_argument('--output_directory',
type=str,
default='./',
help='Path to output directory')
parser.add_argument('--label_map',
type=str,
default="./data/object_detection.pbtxt",
help='Path to label map file')
args = parser.parse_args()
main(args)
我已将kubectl端口转发用于测试目的,因此请求端口设置为localhost:9000。
输出为:
prediction time : 6.690936326980591
[{b'goi_logo': 0.9999970197677612}]
Overall Time : 10.25893259048462
我该怎么做才能加快推理速度?我已经看到推理时间约为毫秒,因此相比之下10秒的持续时间非常长,不适合生产环境。我了解端口转发速度很慢。我可以使用的另一种方法是什么?我需要将此客户端作为API端点提供给全世界。
答案 0 :(得分:2)
如前所述,您确实应该尝试执行多个请求,因为tf-serving第一次需要一些开销。您可以使用热身脚本来防止这种情况。
要添加一些其他选项:
您还可以使用http rest API服务。然后,您可以从Google计算引擎调用在GKE上创建的服务,以减少连接滞后。以我为例,它的速度大大提高了,因为我的本地连接充其量只是中等水平。除了http rest api更易于调试之外,您还可以发送更大的请求。 grpc限制似乎为1.5 mb,而http限制更高。
您要发送b64编码的图像吗?发送图像本身比发送b64编码的字符串要慢得多。我处理此问题的方法是从图像发送b64编码的字符串,并在网络前面创建一些额外的层,将字符串再次转换为jpeg图像,然后通过模型对其进行处理。一些代码可以帮助您:
from keras.applications.inception_v3 import InceptionV3, preprocess_input from keras.models import Model import numpy as np import cv2 import tensorflow as tf from keras.layers import Input, Lambda from keras import backend as K base_model = InceptionV3( weights='imagenet', include_top=True) model = Model( inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output) def prepare_image(image_str_tensor): #image = tf.squeeze(tf.cast(image_str_tensor, tf.string), axis=[0]) image_str_tensor = tf.cast(image_str_tensor, tf.string) image = tf.image.decode_jpeg(image_str_tensor, channels=3) #image = tf.divide(image, 255) #image = tf.expand_dims(image, 0) image = tf.image.convert_image_dtype(image, tf.float32) return image def prepare_image_batch(image_str_tensor): return tf.map_fn(prepare_image, image_str_tensor, dtype=tf.float32) # IF BYTE STR model.layers.pop(0) print(model.layers[0]) input_img = Input(dtype= tf.string, name ='string_input', shape = () ) outputs = Lambda(prepare_image_batch)(input_img) outputs = model(outputs) inception_model = Model(input_img, outputs) inception_model.compile(optimizer = "sgd", loss='categorical_crossentropy') weights = inception_model.get_weights()