如何使用BeautifulSoup遍历网页抓取的网址列表

时间:2017-06-29 10:59:09

标签: python beautifulsoup

有谁知道如何从Beautifulsoup从同一个网站上删除网址列表? list = ['url1','url2','url3'...]

=============================================== ===========================

我提取网址列表的代码:

url = 'http://www.hkjc.com/chinese/racing/selecthorsebychar.asp?ordertype=2'
url1 = 'http://www.hkjc.com/chinese/racing/selecthorsebychar.asp?ordertype=3'
url2 = 'http://www.hkjc.com/chinese/racing/selecthorsebychar.asp?ordertype=4'

r  = requests.get(url)
r1  = requests.get(url1)
r2  = requests.get(url2)

data = r.text
soup = BeautifulSoup(data, 'lxml')
links = []

for link in soup.find_all('a', {'class': 'title_text'}):
    links.append(link.get('href'))

data1 = r1.text

soup = BeautifulSoup(data1, 'lxml')

for link in soup.find_all('a', {'class': 'title_text'}):
    links.append(link.get('href'))

data2 = r2.text

soup = BeautifulSoup(data2, 'lxml')

for link in soup.find_all('a', {'class': 'title_text'}):
    links.append(link.get('href'))

new = ['http://www.hkjc.com/chinese/racing/']*1123

url_list = ['{}{}'.format(x,y) for x,y in zip(new,links)]

从url的单页提取的代码:

from urllib.request import urlopen  
from bs4 import BeautifulSoup  
import requests  
import pandas as pd  

url = 'myurl'

r = requests.get(myurl)

r.encoding = 'utf-8'

html_content = r.text

soup = BeautifulSoup(html_content, 'lxml')

soup.findAll('tr')[27].findAll('td')

column_headers = [th.getText() for th in
                  soup.findAll('tr')[27].findAll('td')]

data_rows =soup.findAll('tr')[29:67]
data_rows

player_data = [[td.getText() for td in data_rows[i].findAll('td', {'class':['htable_text', 'htable_eng_text']})]
            for i in range(len(data_rows))]

player_data_02 = []

for i in range(len(data_rows)):
    player_row = []

    for td in data_rows[i].findAll('td'):
        player_row.append(td.getText())

    player_data_02.append(player_row)

df = pd.DataFrame(player_data, columns=column_headers[:18])

1 个答案:

答案 0 :(得分:2)

基于您的链接子集表数据的集合如下:

from bs4 import BeautifulSoup as BS
import requests  
import pandas as pd  

url_list = ['http://www.hkjc.com/english/racing/horse.asp?HorseNo=S217','http://www.hkjc.com/english/racing/horse.asp?HorseNo=A093','http://www.hkjc.com/english/racing/horse.asp?HorseNo=V344','http://www.hkjc.com/english/racing/horse.asp?HorseNo=V077', 'http://www.hkjc.com/english/racing/horse.asp?HorseNo=P361', 'http://www.hkjc.com/english/racing/horse.asp?HorseNo=T103']


for link in url_list:
    r = requests.get(link)
    r.encoding = 'utf-8'

    html_content = r.text
    soup = BS(html_content, 'lxml')


    table = soup.find('table', class_='bigborder')
    if not table:
        continue

    trs = table.find_all('tr')

    if not trs:
        continue #if trs are not found, then starting next iteration with other link

    headers = trs[0]
    headers_list=[]
    for td in headers.find_all('td'):
        headers_list.append(td.text)
    headers_list+=['Season']
    headers_list.insert(19,'pseudocol1')
    headers_list.insert(20,'pseudocol2')
    headers_list.insert(21,'pseudocol3')

    res=[]
    row = []
    season = ''
    for tr in trs[1:]:
        if 'Season' in tr.text:
            season = tr.text

        else:
            tds = tr.find_all('td')
            for td in tds:
                row.append(td.text.strip('\n').strip('\r').strip('\t').strip('"').strip()) #clean data
            row.append(season.strip())
            res.append(row)
            row=[]

    res = [i for i in res if i[0]!='']

    df=pd.DataFrame(res, columns=headers_list)
    del df['pseudocol1'],df['pseudocol2'],df['pseudocol3']
    del df['VideoReplay']

    df.to_csv('/home/username/'+str(url_list.index(link))+'.csv')

如果你想将所有表中的数据存储到一个数据帧,这个小修改就可以了:

from bs4 import BeautifulSoup as BS
import requests  
import pandas as pd  

url_list = ['http://www.hkjc.com/english/racing/horse.asp?HorseNo=S217','http://www.hkjc.com/english/racing/horse.asp?HorseNo=A093','http://www.hkjc.com/english/racing/horse.asp?HorseNo=V344','http://www.hkjc.com/english/racing/horse.asp?HorseNo=V077', 'http://www.hkjc.com/english/racing/horse.asp?HorseNo=P361', 'http://www.hkjc.com/english/racing/horse.asp?HorseNo=T103']


res=[] #placing res outside of loop
for link in url_list:
    r = requests.get(link)
    r.encoding = 'utf-8'

    html_content = r.text
    soup = BS(html_content, 'lxml')


    table = soup.find('table', class_='bigborder')
    if not table:
        continue

    trs = table.find_all('tr')

    if not trs:
        continue #if trs are not found, then starting next iteration with other link


    headers = trs[0]
    headers_list=[]
    for td in headers.find_all('td'):
        headers_list.append(td.text)
    headers_list+=['Season']
    headers_list.insert(19,'pseudocol1')
    headers_list.insert(20,'pseudocol2')
    headers_list.insert(21,'pseudocol3')

    row = []
    season = ''
    for tr in trs[1:]:
        if 'Season' in tr.text:
            season = tr.text

        else:
            tds = tr.find_all('td')
            for td in tds:
                row.append(td.text.strip('\n').strip('\r').strip('\t').strip('"').strip())
            row.append(season.strip())
            res.append(row)
            row=[]

res = [i for i in res if i[0]!=''] #outside of loop

df=pd.DataFrame(res, columns=headers_list) #outside of loop
del df['pseudocol1'],df['pseudocol2'],df['pseudocol3'] 
del df['VideoReplay']

df.to_csv('/home/Username/'+'tables.csv') #outside of loop