IndexError:列表索引超出范围时尝试在表上刮取特定列?

时间:2015-08-07 15:19:23

标签: python parsing beautifulsoup html-parsing

首先,这是我正在运行的代码:

#!/usr/bin/env python
"""Scraping information from https://www.tdcj.state.tx.us/death_row/dr_executed_offenders.html"""

import requests, time
from bs4 import BeautifulSoup

url = "https://www.tdcj.texas.gov/death_row/dr_executed_offenders.html"
county = {}


def counties(countyArg):
    """Fills the `county` variable"""
    f = open("counties.txt")
    for line in f.readlines():
        countyArg[line] = 0

def scrape(urlArg, countyArg):
    """Scrape Site Based on HTML"""
    black = 0
    white = 0
    hispanic = 0

    page = requests.get(urlArg)
    html = page.content

    soup = BeautifulSoup(html, 'html.parser')
    table = soup.find('table')
    for row in table.findAll('tr'):
        if row.findAll('td')[8] == 'Black':
            black += 1
        elif row.findAll('td')[8] == 'White':
            white += 1
        elif row.findAll('td')[8] == 'Hispanic':
            hispanic += 1

        if row.findAll('td')[9] in countyArg:
            countyArg[row.findAll('td')[9]] += 1

    return countyArg
    return black, white, hispanic

if __name__ == '__main__':
    counties(county)
    print scrape(url, county)

运行它会返回:

Traceback (most recent call last):
  File "raceScraper.py", line 44, in <module>
    print scrape(url, county)
  File "raceScraper.py", line 29, in scrape
    if row.findAll('td')[8] == 'Black':
IndexError: list index out of range

这个错误令我感到意外,因为该表确实有8个(实际上是10个)列。请记住我正在使用this表。阅读栏目中的所有答案也使用这种类似列表的格式,并且由于它们被检查为已回答,因此暗示它们也在起作用。那么任何想法?有没有更简单的方法我应该这样做仍然有效?或者我当前的代码中有什么可修复的东西。请记住,我想保留一个HTML Web Scraper。如果我必须使用本地CSV文件,它不会感觉完成。感谢。

1 个答案:

答案 0 :(得分:1)

您需要跳过表格中的第一个标题行

另外,我会使用table.os tr CSS selectorcollections.defaultdict

from collections import defaultdict

races = defaultdict(int)
counties = defaultdict(int)
for row in soup.select('table.os tr')[1:]:
    cells = row.find_all("td")

    race = cells[8].text
    races[race] += 1

    county = cells[9].text
    counties[county] += 1

打印:

defaultdict(<type 'int'>, {u'Other': 2, u'Hispanic': 96, u'White': 233, u'White ': 1, u'Black': 195})
defaultdict(<type 'int'>, {u'Lubbock ': 1, u'Bowie': 4, u'ValVerde': 1, u'El Paso ': 2, u'Smith': 9, u'Montgomery': 14, u'Coryell ': 1, u'Bailey': 1, u'Trinity': 1, u'Henderson': 2, u'Brown': 1, u'Sabine': 1, u'Shelby': 1, u'Victoria': 2, u'El Paso': 1, u'Harris': 123, u'Matagorda': 2, u'Parker ': 1, u'Scurry': 1, u'Navarro': 6, u'Tarrant': 38, u'San Jacinto': 1, u'Upshur': 1, u'Parker': 1, u'Jefferson ': 1, u'Anderson ': 1, u'Freestone': 1, u'Newton': 1, u'Potter': 10, u'Aransas': 1, u'Polk': 2, u'Smith ': 2, u'Pecos': 2, u'Caldwell': 1, u'Liberty': 2, u'Anderson': 3, u'Jefferson': 14, u'Nueces ': 3, u'Grayson': 3, u'Bowie ': 1, u'Nacogdoches': 1, u'Lamar': 2, u'Lee': 1, u'Milam': 1, u'Crockett': 1, u'Comal ': 1, u'Hamilton': 1, u'Randall': 2, u'Johnson': 2, u'Hopkins': 1, u'Kerr': 2, u'Clay': 1, u'Comal': 1, u'Kendall': 1, u'Taylor': 5, u'McLennan ': 1, u'Travis': 8, u'Refugio': 2, u'Cameron': 6, u'Nueces': 12, u'Wilbarger': 2, u'Hidalgo': 4, u'Gillespie': 1, u'Bexar': 42, u'Dallas': 52, u'Cherokee': 3, u'Collin': 6, u'Red River': 1, u'McLennan': 6, u'Tom Green': 3, u'Gregg': 5, u'San Patricio': 1, u'Montgomery ': 1, u'Red River ': 1, u'Bee': 1, u'Walker': 3, u'Dallas ': 2, u'Llano': 1, u'Bell': 3, u'Fort Bend ': 1, u'Brazoria': 4, u'Leon': 2, u'Kaufman': 1, u'Harrison': 1, u'Kerr ': 1, u'Denton': 6, u'Ellis': 2, u'Lubbock': 11, u'Randall ': 1, u'Wood': 1, u'Williamson': 3, u'Brazos': 11, u'Hale': 2, u'Jones': 1, u'Hardin': 1, u'Fort Bend': 4, u'Hunt': 3, u'Wichita': 2, u'Galveston ': 1, u'Morris': 1, u'Chambers': 1, u'Kleberg': 1, u'Bastrop': 1, u'Galveston': 5, u'Brazos ': 1, u'Atascosa': 1, u'Liberty ': 1, u'Harris ': 1, u'Dawson': 1})

或者,使用Counter class