分页Webscraping Python3- BS4-While循环

时间:2018-06-30 10:12:56

标签: python-3.x web-scraping beautifulsoup

我完成了一页的抓取,并提取了下一页的href。

我无法为每个后续页面循环获取抓取器。我尝试了While True循环,但这会扼杀我从第一页得到的结果。

此代码非常适合首页:

import bs4
from urllib.request import urlopen as ireq
from bs4 import BeautifulSoup as soup

myurl = ('https://www.podiuminfo.nl/concertagenda/')
uClient = ireq(myurl)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html, "html.parser")

filename = "db.csv"
f = open(filename, "w")
headers = "Artist, Venue, City, Date\n"
f.write(headers)

DayContainer = page_soup.findAll("section",{"class":"overflow"})
print("Days on page: " + str(len(DayContainer)) + "\n")

def NextPage():
    np = page_soup.findAll("section", {"class":"next_news"})
    np = np[0].find('a').attrs['href']
    print(np)

for days in DayContainer: 
    shows = days.findAll("span", {"class":"concert_uitverkocht"})

    for soldout in shows:
        if shows:
            soldoutPlu = shows[0].parent.parent.parent

            artist = soldoutPlu.findAll("div", {"class":"td_2"})
            artist = artist[0].text.strip()

            venue = soldoutPlu.findAll("div", {"class":"td_3"})
            venue = venue[0].text

            city = soldoutPlu.findAll("div", {"class":"td_4"})
            city = city[0].text

            date = shows[0].parent.parent.parent.parent.parent
            date = date.findAll("section", {"class":"concert_agenda_date"})
            date = date[0].text
            date = date.strip().replace("\n", " ")
            print("Datum gevonden!")

            print("Artiest: " + artist)
            print("Locatie: " + venue)
            print("Stad: " + city) 
            print("Datum: " + date+ "\n")

            f.write(artist + "," + date + "," + city + "," + venue + "\n")

        else: 
            pass

NextPage()

我想不需要baseurl +数字方法,因为我可以使用findAll从每个页面提取正确的url。我很新,所以错误一定很傻。

感谢您的帮助!

2 个答案:

答案 0 :(得分:0)

您的错误

您必须获取在文件末尾找到的URL,您只是在调用NextPage(),但它的作用只是打印出URL

那是你的错误:)

import bs4
from urllib.request import urlopen as ireq
from bs4 import BeautifulSoup as soup

filename = "db.csv"
#at the beginning of the document you create the file in  'w'-write mode
#but later you should open it in "A"-append mode  because 'W'-write will rewrite the file
f = open(filename, "w")
headers = "Artist, Venue, City, Date\n"
f.write(headers)
f.close()

#create a function url_fetcher that everytime will go and fetch the html
def url_fetcher(url):
    myurl = (url)
    uClient = ireq(myurl)
    page_html = uClient.read()
    uClient.close()
    page_soup = soup(page_html, "html.parser")
    DayContainer = page_soup.findAll("section",{"class":"overflow"})
    print("Days on page: " + str(len(DayContainer)) + "\n")
    get_artist(DayContainer, page_soup)

#here you have to call the url otherwize it wont work
def NextPage(page_soup):
    np = page_soup.findAll("section", {"class":"next_news"})
    np = np[0].find('a').attrs['href']
    url_fetcher(np)

#in get artist you have some repeatings but you can tweak alittle bit and it will work
def get_artist(DayContainer, page_soup):
    for days in DayContainer:
        shows = days.findAll("span", {"class":"concert_uitverkocht"})

        for soldout in shows:
            print(soldout)
            if shows:
                soldoutPlu = shows[0].parent.parent.parent

                artist = soldoutPlu.findAll("div", {"class":"td_2"})
                artist = artist[0].text.strip()

                venue = soldoutPlu.findAll("div", {"class":"td_3"})
                venue = venue[0].text

                city = soldoutPlu.findAll("div", {"class":"td_4"})
                city = city[0].text

                date = shows[0].parent.parent.parent.parent.parent
                date = date.findAll("section", {"class":"concert_agenda_date"})
                date = date[0].text
                date = date.strip().replace("\n", " ")
                print("Datum gevonden!")

                print("Artiest: " + artist)
                print("Locatie: " + venue)
                print("Stad: " + city)
                print("Datum: " + date+ "\n")
                with open(filename, "a") as f:
                    f.write(artist + "," + date + "," + city + "," + venue + "\n")

            else:
                pass
        NextPage(page_soup)
url_fetcher('https://www.podiuminfo.nl/concertagenda/')

回顾

为了更容易理解,我做了一个很大的循环,但是它是可行的:)

您需要对进行一些调整,以便db.csv中没有重复的名称和日期

答案 1 :(得分:0)

尝试使用以下脚本来获取遍历不同页面的必填字段,并将其相应地写入csv文件。我试图清理您的重复编码,并应用了稍微更简洁的方法来代替它。试试吧:

import csv
from urllib.request import urlopen
from bs4 import BeautifulSoup

link = 'https://www.podiuminfo.nl/concertagenda/?page={}&input_plaats=&input_datum=2018-06-30&input_podium=&input_genre=&input_provincie=&sort=&input_zoek='

with open("output.csv","w",newline="",encoding="utf-8") as infile:
    writer = csv.writer(infile)
    writer.writerow(['Artist','Venue','City'])

    pagenum = -1   #make sure to get the content of the first page as well which is "0" in the link
    while True:
        pagenum+=1
        res = urlopen(link.format(pagenum)).read()
        soup = BeautifulSoup(res, "html.parser")
        container = soup.find_all("section",class_="concert_rows_info")
        if len(container)<=1:break  ##as soon as there is no content the scraper should break out of the loop

        for items in container:
            artist = items.find(class_="td_2")("a")[0].get_text(strip=True)
            venue = items.find(class_="td_3").get_text(strip=True)
            city = items.find(class_="td_4").get_text(strip=True)
            writer.writerow([artist,city,venue])
            print(f'{artist}\n{venue}\n{city}\n')