BeautifulSoup find_all(“img”)不适用于所有网站

时间:2017-05-15 17:32:27

标签: python beautifulsoup python-requests

我正在尝试编写一个Python脚本来从任何网站下载图像。它工作,但不一致。具体来说,find_all(“img”)对第二个url没有这样做。脚本是:

# works for http://proof.nationalgeographic.com/2016/02/02/photo-of-the-day-best-of-january-3/
# but not http://www.nationalgeographic.com/photography/proof/2017/05/lake-chad-desertification/
import requests
from PIL import Image
from io import BytesIO
from bs4 import BeautifulSoup

def url_to_image(url, filename):
    # get HTTP response, open as bytes, save the image
    # http://docs.python-requests.org/en/master/user/quickstart/#binary-response-content
    req = requests.get(url)
    i = Image.open(BytesIO(req.content))
    i.save(filename)

# open page, get HTML request and parse with BeautifulSoup
html = requests.get("http://proof.nationalgeographic.com/2016/02/02/photo-of-the-day-best-of-january-3/")
soup = BeautifulSoup(html.text, "html.parser")

# find all JPEGS in our soup and write their "src" attribute to array
urls = []
for img in soup.find_all("img"):
    if img["src"].endswith("jpg"):
        print("endswith jpg")
        urls.append(str(img["src"]))
    print(str(img))

jpeg_no = 00
for url in urls:
    url_to_image(url, filename="NatGeoPix/" + str(jpeg_no) + ".jpg")
    jpeg_no += 1

1 个答案:

答案 0 :(得分:1)

在失败的页面上使用JavaScript呈现图像。 首先使用dryscrape

呈现页面

(如果您不想使用dryscrape,请参阅Web-scraping JavaScript page with Python

e.g。

import requests
from PIL import Image
from io import BytesIO
from bs4 import BeautifulSoup
import dryscrape

def url_to_image(url, filename):
    # get HTTP response, open as bytes, save the image
    # http://docs.python-requests.org/en/master/user/quickstart/#binary-response-content
    req = requests.get(url)
    i = Image.open(BytesIO(req.content))
    i.save(filename)

# open page, get HTML request and parse with BeautifulSoup

session = dryscrape.Session()
session.visit("http://www.nationalgeographic.com/photography/proof/2017/05/lake-chad-desertification/")
response = session.body()
soup = BeautifulSoup(response, "html.parser")

# find all JPEGS in our soup and write their "src" attribute to array
urls = []
for img in soup.find_all("img"):
    if img["src"].endswith("jpg"):
        print("endswith jpg")
        urls.append(str(img["src"]))
        print(str(img))

jpeg_no = 00
for url in urls:
    url_to_image(url, filename="NatGeoPix/" + str(jpeg_no) + ".jpg")
    jpeg_no += 1

但我还会检查你有一个绝对的URL而不是一个相对的URL:

import requests
from PIL import Image
from io import BytesIO
from bs4 import BeautifulSoup
import dryscrape
from urllib.parse import urljoin


def url_to_image(url, filename):
    # get HTTP response, open as bytes, save the image
    # http://docs.python-requests.org/en/master/user/quickstart/#binary-response-content
    req = requests.get(url)
    i = Image.open(BytesIO(req.content))
    i.save(filename)

# open page, get HTML request and parse with BeautifulSoup
base = "http://www.nationalgeographic.com/photography/proof/2017/05/lake-chad-desertification/"
session = dryscrape.Session()
session.visit(base)
response = session.body()
soup = BeautifulSoup(response, "html.parser")

# find all JPEGS in our soup and write their "src" attribute to array
urls = []
for img in soup.find_all("img"):
    if img["src"].endswith("jpg"):
        print("endswith jpg")
        urls.append(str(img["src"]))
        print(str(img))

jpeg_no = 00
for url in urls:
    if url.startswith( 'http' ):
        absoute = url
    else:
        absoute = urljoin(base, url)
    print (absoute)
    url_to_image(absoute, filename="NatGeoPix/" + str(jpeg_no) + ".jpg")
    jpeg_no += 1