Web爬虫问题:IndexError:字符串索引超出范围

时间:2017-01-04 01:19:37

标签: python index-error

我正在制作网络抓取工具。我没有使用scrapy或任何东西,我试图让我的脚本做大部分事情。我已经尝试过搜索问题,但我似乎无法找到任何有助于解决错误的内容。我尝试转换一些变量来尝试缩小问题范围。我在第24行收到错误,说IndexError:字符串索引超出范围。函数在第一个url(原始url)上运行,然后是第二个url,在原始数组中的第三个上失败。我输了,任何帮助都会受到很大的赏识!请注意,我只打印所有这些内容进行测试,我最终会将它们打印到文本文件中。

import requests
from bs4 import BeautifulSoup

# creating requests from user input
url = raw_input("Please enter a domain to crawl, without the 'http://www' part : ")

def makeRequest(url):
    r = requests.get('http://' + url)
    # Adding in BS4 for finding a tags in HTML
    soup =  BeautifulSoup(r.content, 'html.parser')
    # Writes a as the link found in the href
    output = soup.find_all('a')
    return output


def makeFilter(link):
    # Creating array for our links
    found_link = []
    for a in link:
        a = a.get('href')
        a_string = str(a)

        # if statement to filter our links
        if a_string[0] == '/': # this is the line with the error
            # Realtive Links
            found_link.append(a_string)

        if 'http://' + url in a_string:
            # Links from the same site
            found_link.append(a_string)

        if 'https://' + url in a_string:
            # Links from the same site with SSL
            found_link.append(a_string)

        if 'http://www.' + url in a_string:
            # Links from the same site
            found_link.append(a_string)

        if 'https://www.' + url in a_string:
            # Links from the same site with SSL
            found_link.append(a_string)
        #else:  
        #   found_link.write(a_string + '\n') # testing only
    output = found_link

    return output   

# Function for removing duplicates
def remove_duplicates(values):
    output = []
    seen = set()
    for value in values:
        if value not in seen:
            output.append(value)
            seen.add(value)
    return output

# Run the function with our list in this order -> Makes the request -> Filters the links -> Removes duplicates
def createURLList(values):
    requests = makeRequest(values)
    new_list = makeFilter(requests)
    filtered_list = remove_duplicates(new_list)

    return filtered_list

result = createURLList(url)

# print result

# for verifying and crawling resulting pages
for b in result:
    sub_directories = createURLList(url + b)
    crawler = []
    crawler.append(sub_directories)

    print crawler

1 个答案:

答案 0 :(得分:1)

a_string = str(a)尝试添加后:

if not a_string:
  continue