我已经编写了下面提到的代码,但是我希望该过程在开始下一批之前不要等待命令完成。这段代码正在休眠18秒。
import urllib.request
import subprocess
import time
import os
URLS = [ 'sleep 10','sleep 18','echo hi'] * 10
#stdout=subprocess.DEVNULL,
def load_url(url, timeout):
FNULL = open(os.devnull, 'w')
result = subprocess.call(url, shell=True, stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)
count = 0
x= True
starttime = time.time()
while True:
count = count + 5
print ("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
#time.sleep(2)
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
future_to_url = {executor.submit(load_url, url, 0): url for url in URLS}
for future in concurrent.futures.as_completed(future_to_url):
url = future_to_url[future]
try:
data = future.result()
pass
except Exception as exc:
pass
else:
pass