Python多处理池没有理由冻结

时间:2016-09-01 08:47:37

标签: python web-crawler python-multiprocessing

我是python的新手,我希望有人可以帮助我。

几周前我开始学习python,并尝试构建一个webcrawler。

这个想法如下:第一部分从网站(每个字母)抓取域名。第二部分检查域是否有效(可访问但未停放)并将其保留在数据库中。

一切都做得很好,直到爬行者到达“r'”。几分钟后,程序冻结,没有任何错误消息等。 也是' r'之后的字母。没有任何问题......程序冻结的域名是不相同的。

这是我的代码:

import requests
import re
import logging
import time

from bs4 import BeautifulSoup

from multiprocessing.pool import Pool

""" Extract only the plain text of element
"""
def visible(element):
    if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
        return False
    elif re.match('.*<!--.*-->.*', str(element), re.DOTALL):
        return False
    elif re.fullmatch(r"[\s\r\n]", str(element)):
        return False
    return True


logging.basicConfig(format='%(asctime)s %(name)s - %(levelname)s: %(message)s', level=logging.ERROR)
logger = logging.getLogger('crawler')
hdlr = logging.FileHandler('crawler.log')
formatter = logging.Formatter('%(asctime)s %(name)s - %(levelname)s: %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)

""" Checks if a domain is parked.
    Returns true if a domain is not parked, otherwise false
    """
def check_if_valid(website):
    try:
        resp = requests.get("http://www." + website, timeout=10, verify=False)

        soup = BeautifulSoup(resp.text, 'html.parser')

        if len(soup.find_all('script')) == 0:
            # check for very small web pages
            if len(resp.text) < 700:
                return None
            # check for 'park' pattern
            text = filter(visible, soup.find_all(text=True))
            for elem in text:
                if 'park' in elem:
                    return None

        return "http://www." + website + "/"

    except requests.exceptions.RequestException as e:
        # no logging -> too many exceptions
        return None
    except Exception as ex:
        logger.exception("Error during domain validation")


def persist_domains(nonParkedDomains):
    logger.info("Inserting domains into database")
    dbConn = mysqlDB.connect()

    for d in nonParkedDomains:
        mysqlDB.insert_company_domain(dbConn, d)

    mysqlDB.close_connection(dbConn)


if __name__ =="__main__":
    dryrun = True

    if dryrun:
        logger.warning("Testrun! Data does not get persisted!")

    url = "http://www.safedomain.at/"

#    chars = ['0-9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't','u', 'v', 'w', 'x', 'y', 'z']
    chars = ['r','s', 't','u', 'v', 'w', 'x', 'y', 'z']
    payload = {'sub': 'domains', 'char': '', 'page': '1'}

    domains = list()
    cntValidDomains = 0


    logger.info("Start collecting domains from \"http://www.safedomain.at\"....")
    try:
        for c in chars:
            payload['char'] = c
            payload['page'] = '1'

            response = requests.get(url, params=payload, verify=False)
            soup = BeautifulSoup(response.text, 'html.parser')

            while not soup.find_all('a', {'data-pagenumber': True}):
                time.sleep(5)
                response = requests.get(url, params=payload, verify=False)
                soup = BeautifulSoup(response.text, 'html.parser')

            maxPage = int(soup.find_all('a', {'data-pagenumber': True})[-1].getText())

            domains = list()
            for page in range(1, maxPage + 1):
                payload['page'] = page

                logger.debug("Start crawling with following payload: char=%s page=%s", payload['char'], payload['page'])

                response = requests.get(url, params=payload)
                soup = BeautifulSoup(response.text, 'html.parser')

                for elem in soup.find_all('ul', {'class': 'arrow-list'}):
                    for link in elem.find_all('a'):
                        domains.append(link.getText())

            logger.info("Finished! Collected domains for %s: %s",c, len(domains))
            logger.info("Checking if domains are valid...")

            with Pool(48) as p:
                nonParkedDomains = p.map(check_if_valid, domains)

            p.close()
            p.join()

            nonParkedDomains = list(filter(None.__ne__, nonParkedDomains))

            cntTemp = cntTemp + len(nonParkedDomains)

            # check if domains should get persisted

            if dryrun:
                logger.info("Valid domains for %s in domains", c)
                for elem in nonParkedDomains:
                    logger.info(elem)
            else:
                persist_domains(nonParkedDomains)

            logger.info("Finished domain validation for %s!", c)
            cntValidDomains = cntTemp + cntValidDomains

        logger.info("Valid domains: %s", cntTemp)
        logger.info("Program finished!")

    except Exception as e:
        logger.exception("Domain collection stopped unexpectedly")
编辑:经过几个小时的调试和测试,我有了一个主意。可能是在线程中使用的请求模块会导致麻烦吗?

1 个答案:

答案 0 :(得分:0)

经过几个小时的调试和测试后,我可以解决问题。

我没有使用多处理池,而是使用了ThreadPoolExecutor(这对网络应用更好)

我已经发现线程函数中的requests.get()会引起一些麻烦。我将超时更改为1。

在这些改变之后,该计划奏效了。

我不知道确切的原因,但我对此非常感兴趣。如果有人知道,我会很感激,如果他/她可以发布它。