gunicorn正确的响应使用多线程与tensorflow

时间:2017-06-07 08:48:04

标签: tensorflow gunicorn

我添加了一个带有烧瓶的安静界面来对齐图像。  如果使用gunicorn运行代码:

out = pnet(img_y)

调用align.detect_face.detect_face来计算图像的bounding_box时,无法获取pnet的值。运行时停止功能

python faceapi.py

但如果我使用def create_mtcnn(sess, model_path): if not model_path: model_path,_ = os.path.split(os.path.realpath(__file__)) with tf.variable_scope('pnet'): data = tf.placeholder(tf.float32, (None,None,None,3), 'input') pnet = PNet({'data':data}) pnet.load(os.path.join(model_path, 'det1.npy'), sess) with tf.variable_scope('rnet'): data = tf.placeholder(tf.float32, (None,24,24,3), 'input') rnet = RNet({'data':data}) rnet.load(os.path.join(model_path, 'det2.npy'), sess) with tf.variable_scope('onet'): data = tf.placeholder(tf.float32, (None,48,48,3), 'input') onet = ONet({'data':data}) onet.load(os.path.join(model_path, 'det3.npy'), sess) pnet_fun = lambda img : sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0':img}) rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img}) onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img}) return pnet_fun, rnet_fun, onet_fun ,它将获得正确的结果。但我想使用多线程来响应每个请求。如何使用线程池进行响应?

pnet在align.detect_face.create_mtcnn中使用以下代码:

#faceapi.py
from flask import Flask, jsonify, request
import tensorflow as tf
import align.detect_face
import facenet
from scipy import misc
import numpy as np
import cv2
import base64

app = Flask(__name__)

model_dir = "./20170512-110547"
with tf.Graph().as_default():
    sess = tf.Session()
    with sess.as_default():
        # Load mtcnn model
        minsize = 20  # minimum size of face
        threshold = [0.6, 0.7, 0.7]  # three steps's threshold
        factor = 0.709  # scale factor
        pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)

def alignimage(image_files, margin, image_size, minsize, pnet, rnet, onet, threshold, factor):
    nrof_samples = len(image_files)
    img_list = [None] * nrof_samples
    for i in range(nrof_samples):
        tmp_image = base64.b64decode(image_files[i])
        imgarray = np.fromstring(tmp_image, dtype=np.uint8)
        imgbgra = cv2.imdecode(imgarray, 1)
        img = cv2.cvtColor(imgbgra, cv2.COLOR_BGR2RGB)

        img_size = np.asarray(img.shape)[0:2]
        bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
        det = np.squeeze(bounding_boxes[0, 0:4])
        bb = np.zeros(4, dtype=np.int32)
        bb[0] = np.maximum(det[0] - margin / 2, 0)
        bb[1] = np.maximum(det[1] - margin / 2, 0)
        bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
        bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
        cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
        aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
        prewhitened = facenet.prewhiten(aligned)
        img_list[i] = prewhitened

@app.route('/api/facecompare', methods=['POST'])
def facecompare():
    img_list = [None] * 2
    img_list[0] = request.json['imageA']
    img_list[1] = request.json['imageB']
    alignimage(img_list, 44, 160, minsize, pnet, rnet, onet, threshold, factor)

if __name__ == '__main__':
    app.run()

restful接口如下:

JSON.parse

0 个答案:

没有答案