脚本仅抓取第一页,而不抓取多个页面

时间:2020-01-12 23:43:09

标签: python web-scraping web-crawler

我正在尝试抓取网站的多个页面。但是该程序只能抓取第一页。

import requests
from bs4 import BeautifulSoup
import re
import json
import time

def make_soup(url):

    source = requests.get(url).text
    soup = BeautifulSoup(source, 'lxml')

    pattern = re.compile(r'window.__WEB_CONTEXT__={pageManifest:(\{.*\})};')
    script = soup.find("script", text=pattern)
    jsonData = pattern.search(script.text).group(1)

    pattern_number = re.compile(r'\"[0-9]{9,12}\":(\{\"data\":\{\"cachedFilters\":(.*?)\}\}),\"[0-9]{9,11}\"')
    jsonData2 = pattern_number.search(jsonData).group(1)

    dictData = json.loads(jsonData2)
    return dictData

def get_reviews(dictData):

    """ Return a list of five dicts with reviews.
    """

    all_dictionaries = []

    for data in dictData['data']['locations']:
        for reviews in data['reviewListPage']['reviews']:

            review_dict = {}

            review_dict["reviewid"] = reviews['id']
            review_dict["reviewurl"] =  reviews['absoluteUrl']
            review_dict["reviewlang"] = reviews['language']
            review_dict["reviewdate"] = reviews['createdDate']

            userProfile = reviews['userProfile']
            review_dict["author"] = userProfile['displayName']

            all_dictionaries.append(review_dict)

    return all_dictionaries

def main():

    url = 'https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-Coronado_Hotel-Zurich.html#REVIEWS'

    dictData = make_soup(url)
    review_list = get_reviews(dictData) # list with five dicts
    #print(review_list)

    page_number = 5

    while page_number <= 260: # number in the URL
        next_url = 'https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-or' + str(page_number) + '-Coronado_Hotel-Zurich.html#REVIEWS'
        dictData = make_soup(url)
        review_list2 = get_reviews(dictData)
        print(review_list2)

        page_number += 5
        time.sleep(0.5)

if __name__ == "__main__":
    main()

而且我不确定是否可以使用此URL抓取多个页面。在网站上有54页,但是在URL中我总是必须添加数字5,如下所示:

Page 1
https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-Coronado_Hotel-Zurich.html#REVIEWS

Page2
https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-or5-Coronado_Hotel-Zurich.html#REVIEWS

Page3
https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-or10-Coronado_Hotel-Zurich.html#REVIEWS

我不知道这是个好主意。 你有什么建议吗?先感谢您!

1 个答案:

答案 0 :(得分:1)

您将新的网址添加到next_url,但是您使用url来读取页面。

next_url = 'https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-or' + str(page_number) + '-Coronado_Hotel-Zurich.html#REVIEWS'
dictData = make_soup(url)

您必须重命名变量

url = 'https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-or' + str(page_number) + '-Coronado_Hotel-Zurich.html#REVIEWS'
dictData = make_soup(url)