网络抓取Python

时间:2019-10-29 01:21:08

标签: python

Hello Community是python的初学者,他希望创建一个能够部分挽救生命的工具。我做了这个小的代码,该代码被过滤为页面的img标签。

import requests 
from bs4 import BeautifulSoup 
    t=input('Digite o Nome do Filme:') 
    ano=int(input('Digite o Ano do Filme:')) 

if ano==1:
  req=requests.get('https://www.themoviedb.org/search?query='+t+'&language=pt-BR')
  bs=BeautifulSoup(req.text, 'lxml') 
  print(bs.find_all('img')) 
else:
  req=requests.get('https://www.themoviedb.org/search?query='+t+'%20y%3A'+str(ano)+'&language=pt-BR')
  bs=BeautifulSoup(req.text, 'lxml') 
  print(bs.find_all('img')) 

然后,我完成了获取图像链接并将其显示在控制台上的另一部分。

import io
import os
import requests
import tempfile
from PIL import Image
from matplotlib import pyplot as plt

img_url = 'https://image.tmdb.org/t/p/w500_and_h282_face/dKxkwAJfGuznW8Hu0mhaDJtna0n.jpg'

buffer = tempfile.SpooledTemporaryFile(max_size=1e9)
r = requests.get(img_url, stream=True)
if r.status_code == 200:
    downloaded = 0
    filesize = int(r.headers['content-length'])
    for chunk in r.iter_content():
        downloaded += len(chunk)
        buffer.write(chunk)
        print(downloaded/filesize)
    buffer.seek(0)
    i = Image.open(io.BytesIO(buffer.read()))
    i.save(os.path.join('.', 'image.jpg'), quality=85)
buffer.close() 

plt.imshow(i)
plt.show() 

所以我想知道如何使img_url变量自动获取打印URL(bs.find_all('img'))。或者,如果有图书馆。

2 个答案:

答案 0 :(得分:0)

我在您的第一个代码中做了一个修改,现在您可以访问类“ poster fade lazyautosizes lazyloaded”的每个标签img的链接。我建议您将第二个代码放入 一个函数,然后在此脚本中调用它

import requests
from bs4 import BeautifulSoup

t=input('Digite o Nome do Filme:')
ano=int(input('Digite o Ano do Filme:'))

if ano==1:
  req=requests.get('https://www.themoviedb.org/search?query='+t+'&language=pt-BR')
  bs=BeautifulSoup(req.text, 'lxml')
  #elements=bs.find_all('img',class_="fade lazyautosizes lazyloaded")
  elements=bs.select('div.image_content > a > img')
  for element in elements:
      print("LINK")
      print (element['data-src'])
      #Here should put a function for pass the url to script matplotlib 
      myfunctionmatplotlib(element['data-src'])  
else:
  req=requests.get('https://www.themoviedb.org/search?query='+t+'%20y%3A'+str(ano)+'&language=pt-BR')
  bs=BeautifulSoup(req.text, 'lxml')
  elements=bs.select('div.image_content > a > img')
  for element in elements:
      print("LINK")
      print (element['data-src'])
      #Here should put a function for pass the url to script matplotlib 
      myfunctionmatplotlib(element['data-src'])

答案 1 :(得分:0)

我编写了此新代码。您可以尝试下载新的网址图片。我还编写了一个代码来从关键字中获取图片的网址,如果可以解决您的查询,我可以与您分享:

""" Download image according to given urls and automatically rename them in order. """
# -*- coding: utf-8 -*-

from __future__ import print_function

import shutil
import imghdr
import os
import concurrent.futures
import requests

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
    "Proxy-Connection": "keep-alive",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                  "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36",
    "Accept-Encoding": "gzip, deflate, sdch",
    # 'Connection': 'close',
}


def download_image(image_url, dst_dir, file_name, timeout=20, proxy_type=None, proxy=None):
    proxies = None
    if proxy_type is not None:
        proxies = {
            "http": proxy_type + "://" + proxy,
            "https": proxy_type + "://" + proxy
        }

    response = None
    file_path = os.path.join(dst_dir, file_name)
    try_times = 0
    while True:
        try:
            try_times += 1
            response = requests.get(
                image_url, headers=headers, timeout=timeout, proxies=proxies)
            with open(file_path, 'wb') as f:
                f.write(response.content)
            response.close()
            file_type = imghdr.what(file_path)
            # if file_type is not None:
            if file_type in ["jpg", "jpeg", "png", "bmp"]:
                new_file_name = "{}.{}".format(file_name, file_type)
                new_file_path = os.path.join(dst_dir, new_file_name)
                shutil.move(file_path, new_file_path)
                print("## OK:  {}  {}".format(new_file_name, image_url))
            else:
                os.remove(file_path)
                print("## Err:  {}".format(image_url))
            break
        except Exception as e:
            if try_times < 3:
                continue
            if response:
                response.close()
            print("## Fail:  {}  {}".format(image_url, e.args))
            break


def download_images(image_urls, dst_dir, file_prefix="img", concurrency=50, timeout=20, proxy_type=None, proxy=None):
    """
    Download image according to given urls and automatically rename them in order.
    :param timeout:
    :param proxy:
    :param proxy_type:
    :param image_urls: list of image urls
    :param dst_dir: output the downloaded images to dst_dir
    :param file_prefix: if set to "img", files will be in format "img_xxx.jpg"
    :param concurrency: number of requests process simultaneously
    :return: none
    """

    with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor:
        future_list = list()
        count = 0
        if not os.path.exists(dst_dir):
            os.makedirs(dst_dir)
        for image_url in image_urls:
            file_name = file_prefix + "_" + "%04d" % count
            future_list.append(executor.submit(
                download_image, image_url, dst_dir, file_name, timeout, proxy_type, proxy))
            count += 1
        concurrent.futures.wait(future_list, timeout=180)

希望这对您有帮助=)