我正在尝试使用multiprocessing.Pool
下载和解压缩zip文件。但是,每次执行脚本时,只会下载 3个zip ,并且在目录中看不到其余文件(CPU %也触及100%)。有人可以帮助我如何解决此问题/建议更好的方法以及遵循我尝试过的代码段。我对多处理技术完全陌生。我的目标是在不达到最大CPU的情况下并行下载多个文件。
import StringIO
import os
import sys
import zipfile
from multiprocessing import Pool, cpu_count
import requests
filePath = os.path.dirname(os.path.abspath(__file__))
print("filePath is %s " % filePath)
sys.path.append(filePath)
url = ["http://mlg.ucd.ie/files/datasets/multiview_data_20130124.zip",
"http://mlg.ucd.ie/files/datasets/movielists_20130821.zip",
"http://mlg.ucd.ie/files/datasets/bbcsport.zip",
"http://mlg.ucd.ie/files/datasets/movielists_20130821.zip",
"http://mlg.ucd.ie/files/datasets/3sources.zip"]
def download_zips(url):
file_name = url.split("/")[-1]
response = requests.get(url)
sourceZip = zipfile.ZipFile(StringIO.StringIO(response.content))
print("\n Downloaded {} ".format(file_name))
sourceZip.extractall(filePath)
print("extracted {} \n".format(file_name))
sourceZip.close()
if __name__ == "__main__":
print("There are {} CPUs on this machine ".format(cpu_count()))
pool = Pool(cpu_count())
results = pool.map(download_zips, url)
pool.close()
pool.join()
下面的输出
filePath is C:\Users\Documents\GitHub\Python-Examples-Internet\multi_processing
There are 4 CPUs on this machine
filePath is C:\Users\Documents\GitHub\Python-Examples-Internet\multi_processing
filePath is C:\Users\Documents\GitHub\Python-Examples-Internet\multi_processing
filePath is C:\Users\Documents\GitHub\Python-Examples-Internet\multi_processing
filePath is C:\Users\Documents\GitHub\Python-Examples-Internet\multi_processing
Downloaded bbcsport.zip
extracted bbcsport.zip
Downloaded 3sources.zip
extracted 3sources.zip
Downloaded multiview_data_20130124.zip
Downloaded movielists_20130821.zip
Downloaded movielists_20130821.zip
extracted multiview_data_20130124.zip
extracted movielists_20130821.zip
extracted movielists_20130821.zip
答案 0 :(得分:1)
我在您的工作中做了几个小礼拜,效果很好。请注意:
".../movielists_20130821.zip"
出现在列表中两次,因此您要两次重载相同的内容(可能是拼写错误?)".../multiview_data_20130124.zip"
,".../movielists_20130821.zip"
和".../3sources.zip"
产生一个新目录。不过,提取后的文件".../bbcsport.zip"
会将其放置在当前工作目录的根文件夹中(请参见下图)。也许您错过了这张支票?import sys, os
import zipfile
import requests
from multiprocessing import Pool, cpu_count
from functools import partial
from io import BytesIO
def download_zip(url, filePath):
try:
file_name = url.split("/")[-1]
response = requests.get(url)
sourceZip = zipfile.ZipFile(BytesIO(response.content))
print(" Downloaded {} ".format(file_name))
sourceZip.extractall(filePath)
print(" extracted {}".format(file_name))
sourceZip.close()
except Exception as e:
print(e)
if __name__ == "__main__":
filePath = os.path.dirname(os.path.abspath(__file__))
print("filePath is %s " % filePath)
# sys.path.append(filePath) # why do you need this?
urls = ["http://mlg.ucd.ie/files/datasets/multiview_data_20130124.zip",
"http://mlg.ucd.ie/files/datasets/movielists_20130821.zip",
"http://mlg.ucd.ie/files/datasets/bbcsport.zip",
"http://mlg.ucd.ie/files/datasets/movielists_20130821.zip",
"http://mlg.ucd.ie/files/datasets/3sources.zip"]
print("There are {} CPUs on this machine ".format(cpu_count()))
pool = Pool(cpu_count())
download_func = partial(download_zip, filePath = filePath)
results = pool.map(download_func, urls)
pool.close()
pool.join()
答案 1 :(得分:0)
我建议您使用多线程进行此操作,因为它受I / O约束,如下所示:
import requests, zipfile, io
import concurrent.futures
url = ["http://mlg.ucd.ie/files/datasets/multiview_data_20130124.zip",
"http://mlg.ucd.ie/files/datasets/movielists_20130821.zip",
"http://mlg.ucd.ie/files/datasets/bbcsport.zip",
"http://mlg.ucd.ie/files/datasets/movielists_20130821.zip",
"http://mlg.ucd.ie/files/datasets/3sources.zip"]
def download_zips(url):
file_name = url.split("/")[-1]
response = requests.get(url)
sourceZip = zipfile.ZipFile(io.BytesIO(response.content))
print("\n Downloaded {} ".format(file_name))
sourceZip.extractall(filePath)
print("extracted {} \n".format(file_name))
sourceZip.close()
with concurrent.futures.ThreadPoolExecutor() as exector :
exector.map(download_zip, urls)