我有一个存储在Pandas Dataframe中的图像URL列表。我想下载所有这些图像并将其存储在本地。
我这样做的代码是:
import os
import requests
def load(df, output_folder):
print("Ready to load "+str(len(df.index))+" images.")
for i,row in df.iterrows():
print("Image "+str(i))
save_image_from_url(row["image_url"], os.path.join(output_folder, row["image_name"]))
''' From a given URL, download the image and store it at the given path'''
def save_image_from_url(url, output_path):
image = requests.get(url)
with open(output_path, 'wb') as f:
f.write(image.content)
问题在于该过程非常缓慢(每个图像从0.5秒到4秒)。有没有办法更快地做到这一点?
答案 0 :(得分:2)
显而易见的方法是并行下载,docs中有一个清晰的示例
对于您的情况,请尝试以下方法:
import concurrent.futures
import os
import requests
def save_image_from_url(url, output_folder):
image = requests.get(url.image_url)
output_path = os.path.join(
output_folder, url.image_name
)
with open(output_path, "wb") as f:
f.write(image.content)
def load(df, output_folder):
with concurrent.futures.ThreadPoolExecutor(
max_workers=5
) as executor:
future_to_url = {
executor.submit(save_image_from_url, url, output_folder): url
for _, url in df.iterrows()
}
for future in concurrent.futures.as_completed(
future_to_url
):
url = future_to_url[future]
try:
future.result()
except Exception as exc:
print(
"%r generated an exception: %s" % (url, exc)
)