在Python中进行抓取时出错,需要绕过

时间:2015-06-30 18:58:16

标签: python web-scraping

import requests
from bs4 import BeautifulSoup
import csv
from urlparse import urljoin
import urllib2

outfile = open("./battingall.csv", "wb")
writer = csv.writer(outfile)
base_url = 'http://www.baseball-reference.com'
player_url = 'http://www.baseball-reference.com/players/'
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
players = 'shtml'
gamel = '&t=b&year='
game_logs = 'http://www.baseball-reference.com/players/gl.cgi?id='
years = ['2015','2014','2013','2012','2011','2010','2009','2008']

drounders = []
for dround in alphabet:
    drounders.append(player_url + dround)

urlz = []
for ab in drounders:
    data = requests.get(ab)
    soup = BeautifulSoup(data.content)
    for link in soup.find_all('a'):
        if link.has_attr('href'):
            urlz.append(base_url + link['href'])

yent = []
for ant in urlz:
    for d in drounders:
        for y in years:
            if players in ant:
                if len(ant) < 60:
                    if d in ant:
                        yent.append(game_logs + ant[44:-6] + gamel + y)

for j in yent:
    try:
        data = requests.get(j)
        soup = BeautifulSoup(data.content)
        table = soup.find('table', attrs={'id': 'batting_gamelogs'})
        tablea = j[52:59]
        tableb= soup.find("b", text='Throws:').next_sibling.strip()
        tablec= soup.find("b", text='Height:').next_sibling.strip()
        tabled= soup.find("b", text='Weight:').next_sibling.strip()
        list_of_rows = []
        for row in table.findAll('tr'):
            list_of_cells = []
            list_of_cells.append(tablea)
            list_of_cells.append(j[len(j)-4:])
            list_of_cells.append(tableb)
            list_of_cells.append(tablec)
            list_of_cells.append(tabled)
            for cell in row.findAll('td'):
                text = cell.text.replace('&nbsp;', '').encode("utf-8")
                list_of_cells.append(text)
            list_of_rows.append(list_of_cells)
        print list_of_rows
        writer.writerows(list_of_rows)
    except (AttributeError,NameError):
        pass

当我运行此代码来获取游戏日志击球数据时,我不断收到错误:

Traceback (most recent call last):
  File "battinggamelogs.py", line 44, in <module>
    data = requests.get(j)
  File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-      packages/requests/api.py", line 65, in get
    return request('get', url, **kwargs)
  File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-    packages/requests/api.py", line 49, in request
    response = session.request(method=method, url=url, **kwargs)
  File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/requests/sessions.py", line 461, in request
    resp = self.send(prep, **send_kwargs)
  File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/requests/sessions.py", line 573, in send
    r = adapter.send(request, **kwargs)
  File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/requests/adapters.py", line 415, in send
    raise ConnectionError(err, request=request)
requests.exceptions.ConnectionError: ('Connection aborted.',     BadStatusLine("''",))

我需要一种方法绕过这个错误继续前进。我认为错误的原因是因为没有表来从中获取数据。

1 个答案:

答案 0 :(得分:2)

您可以将requests.get()块包装在try / except中。您需要捕获正在生成的requests.exceptions.ConnectionError

for ab in drounders:
    try:
        data = requests.get(ab)
        soup = BeautifulSoup(data.content)
        for link in soup.find_all('a'):
            if link.has_attr('href'):
                urlz.append(base_url + link['href'])
    except requests.exceptions.ConnectionError:
        pass

这是因为连接本身存在问题,而不是因为表中没有数据。你甚至没有那么远。

注意:只需使用pass即可完全消除异常(正如您稍后在代码块中所做的那样)。做这样的事情可能会更好:

except requests.exceptions.ConnectionError:
    print("Failed to open {}".format(ab))

这将在控制台上向您提供有关哪个URL失败的消息。