我写了一个基于flask的深度学习服务,为了提高效率,许多会话和模型都加载到全局变量中。某些预测较慢,因此我将gevent的WSGIServer用于非阻塞访问。但是担心这会导致某些会话同时影响TensorFlow,从而导致错误的结果。那么有什么方法可以确保线程安全?
import os
from flask import Flask, request
from flask_cors import *
import json
import numpy as np
import tensorflow as tf
from gevent import monkey
from gevent.pywsgi import WSGIServer
monkey.patch_all()
app = Flask(__name__)
CORS(app, supports_credentials=True)
# load model and create global prediction session
# some codes
@app.route('/service1', methods=['POST'])
def service:
# some codes using the global session
if __name__ == '__main__':
app.config['JSON_AS_ASCII'] = False
http_server = WSGIServer(('0.0.0.0', 5000), app, keyfile='*****.key',
certfile='******.pem')
http_server.serve_forever()