Python请求停止在文件中间工作

时间:2016-03-23 16:53:08

标签: python exception-handling web-scraping python-requests

我有一个带URL的csv文件,我需要从这些网站中删除元数据。我正在使用python请求,原因如下:

from tempfile import NamedTemporaryFile
import shutil
import csv
from bs4 import BeautifulSoup
import requests
import re
import html5lib
import sys
#import logging

filename = 'TestWTF.csv'
#logging.basicConfig(level=logging.DEBUG)

#Get filename (with extension) from terminal
#filename = sys.argv[1]
tempfile = NamedTemporaryFile(delete=False)
read_timeout = 1.0


#Does actual scraping done, returns metaTag data
def getMetadata (url, metaTag):
    r = requests.get("http://" + url, timeout=2)
    data = r.text
    soup = BeautifulSoup(data, 'html5lib')
    metadata = soup.findAll(attrs={"name":metaTag})
    return metadata

#Gets either keyword or description
def addDescription ( row ):
    scrapedKeywordsData = getMetadata(row,  'keywords')
    if not scrapedKeywordsData:
        print row + ' NO KEYWORDS'
        scrapedKeywordsData = getMetadata(row,  'description')
        if not scrapedKeywordsData:
            return ''
    return scrapedKeywordsData[0]

def prepareString ( data ):
    output = data
    #Get rid of opening meta content
    if output.startswith( '<meta content="' ):
        output = data[15:]

    #Get rid of closing meta content (keywords)
    if output.endswith( '" name="keywords"/>' ):
        output = output[:-19]

    #Get rid of closing meta content (description)
    if output.endswith( '" name="description"/>' ):
        output = output[:-22]

    return output

def iterator():
    with open(filename, 'rb') as csvFile, tempfile:
        reader = csv.reader(csvFile, delimiter=',', quotechar='"')
        writer = csv.writer(tempfile, delimiter=',', quotechar='"')

        i = 0
        for row in reader:
            try:
                data = str(addDescription (row[1] ))
                row[3] = prepareString( data )
            except requests.exceptions.RequestException as e:
                print e
            except requests.exceptions.Timeout as e:
                print e
            except requests.exceptions.ReadTimeout as e:
                print "lol"
            except requests.exceptions.ConnectionError as e:
                print "These aren't the domains we're looking for."
            except requests.exceptions.ConnectTimeout as e:
                print "Too slow Mojo!"



        writer.writerow(row)
        i = i + 1
        print i
    shutil.move(tempfile.name, filename)

def main():
    iterator()


#Defining main function
if __name__ == '__main__':
    main()

它工作得很好但是在一些URL(3000个,比方说2-3个)中它会突然停止工作而不会在超时后进入下一个..所以我必须使用Ctr + C来杀死它导致文件无法保存。

我知道这是一个捕捉异常的问题,但我无法弄清楚哪个或该怎么处理这个问题。我很乐意简单地忽略那个被困住的那个......

编辑:

添加了追溯:

^CTraceback (most recent call last):
  File "blacklist.py", line 90, in <module>
    main()
  File "blacklist.py", line 85, in main
    iterator()
  File "blacklist.py", line 62, in iterator
    data = str(addDescription (row[1] ))
  File "blacklist.py", line 30, in addDescription
    scrapedKeywordsData = getMetadata(row,  'keywords')
  File "blacklist.py", line 25, in getMetadata
    metadata = soup.findAll(attrs={"name":metaTag})
  File "/Library/Python/2.7/site-packages/bs4/element.py", line 1259, in find_all
    return self._find_all(name, attrs, text, limit, generator, **kwargs)
  File "/Library/Python/2.7/site-packages/bs4/element.py", line 537, in _find_all
    found = strainer.search(i)
  File "/Library/Python/2.7/site-packages/bs4/element.py", line 1654, in search
    found = self.search_tag(markup)
  File "/Library/Python/2.7/site-packages/bs4/element.py", line 1626, in search_tag
    if not self._matches(attr_value, match_against):
  File "/Library/Python/2.7/site-packages/bs4/element.py", line 1696, in _matches
    if isinstance(markup, Tag):
KeyboardInterrupt 

编辑2:

脚本不起作用的示例网站:miniusa.com

0 个答案:

没有答案