我尝试使用以下代码从“ https://www.nytimes.com/section/todayspaper”下载所有图像:
import requests
from io import open as iopen
from urlparse import urlsplit
file_url= 'https://www.nytimes.com/section/todayspaper'
def requests_image(file_url):
suffix_list = ['jpg', 'gif', 'png', 'tif', 'svg',]
file_name = urlsplit(file_url)[2].split('/')[-1]
file_suffix = file_name.split('.')[1]
i = requests.get(file_url)
if file_suffix in suffix_list and i.status_code == requests.codes.ok:
with iopen(file_name, 'wb') as file:
file.write(i.content)
else:
return False
运行时没有错误:
>>>
>>>
但是我不知道图像在PC上的哪里下载?
我检查了下载文件夹,但它们不存在。
答案 0 :(得分:1)
如果要下载页面中的所有图像,则应该:
<img>
)src
属性内容import os
import hashlib
import requests
from bs4 import BeautifulSoup
page_url = 'https://www.nytimes.com/section/todayspaper'
# Download page html
page_data = requests.get(page_url).text
# Find all links in page
images_urls = [
image.attrs.get('src')
for image in BeautifulSoup(page_data, 'lxml').find_all('img')
]
# Clean empty links (<img src="" /> <img> etc)
images_urls = [
image_url
for image_url in images_urls
if image_url and len(image_url)>0
]
# Download files
def download_image(source_url, dest_dir):
# TODO: add filename extension
image_name = hashlib.md5(source_url.encode()).hexdigest()
with open(os.path.join(dest_dir, image_name), 'wb') as f:
image_data = requests.get(source_url).content
f.write(image_data)
for image_url in images_urls:
download_image(image_url, './tmp')