我需要在不同端口上部署ml模型。 这是请求预测值的代码的一部分。
第一个模型:
def main():
p = argparse.ArgumentParser(description="Searcher - For sudrf.ru")
p.add_argument('-c', '--config', metavar='<config.yaml>', dest='config_file', required=True,
help='Config file.')
p.add_argument('--loglevel', metavar='<LOG_LEVEL>', dest='log_level', choices=['INFO', 'info', 'DEBUG', 'debug'], default='info',
help='Logger level: info|debug')
args = p.parse_args()
print('Config file: "%s"' % args.config_file)
config = Config(args.config_file)
if not config.is_loaded():
print(config.get_errmsg())
return 1
# create pid file
pidlock = PidLock('courts_search_httpservice.pid')
pid = pidlock.getpid()
if pid:
print('Service is already running with pid %s' % pid)
return 1
pidlock.lock()
print('PID file: %s' % pidlock.get_pidfile())
logging.basicConfig(level=args.log_level.upper(), format='%(asctime)s:courts_search:%(levelname)s\t%(message)s',
filemode='w', filename=config.fields.get('log_file', 'court_search.log'))
# bind on host-port
host = config.fields.get('host', '0.0.0.0')
port = config.fields.get('port', 8080)
logging.info('Binding on address %s:%s' % (host, port))
http_handler_type = HttpHandler
http_handler_type.error_content_type = CONTENT_TYPE
server = ThreadedHTTPServer((host, port), http_handler_type)
# init indexer and query parser
# TODO: fixup path if it's not absolute
logging.info('Loading model' + config.fields['graph_file'])
server.model = Model(config.fields['checkpoint_path'], config.fields['graph_file'], config.fields['data_file'], config.fields['threshold_l'], config.fields['threshold_h'])
logging.info('Done.')
# start serving
print('Starting server, use <Ctrl-C> to stop')
logging.info('Processing incoming requests...')
try:
server.serve_forever()
except KeyboardInterrupt:
logging.info('[STOP] KeyboardInterrupt received')
server.socket.close()
return 0
我尝试将port
更改为8000
,但它返回
Service is already running with pid 19931
我应该更改那里的以并行运行2个模型吗? 我不能只更改端口以并行使用它吗?