我正在尝试构建Web Scrapper项目 我想做的一件事是智能重试机制 使用urlib3和请求以及漂亮的汤
当我将超时设置为1时 为了使重试失败并检查异常是否重试 下面的代码:
import requests
import re
from bs4 import BeautifulSoup
import json
import time
import sys
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
# this get_items methods is for getting dict of link to scrap items per link
def get_items(self, dict):
itemdict = {}
for k, v in dict.items():
boolean = True
# here, we fetch the content from the url, using the requests library
while (boolean):
try:
a =requests.Session()
retries = Retry(total=3, backoff_factor=0.1, status_forcelist=[301,500, 502, 503, 504])
a.mount(('https://'), HTTPAdapter(max_retries=retries))
page_response = a.get('https://www.XXXXXXX.il' + v, timeout=1)
except requests.exceptions.Timeout:
print ("Timeout occurred")
logging.basicConfig(level=logging.DEBUG)
else:
boolean = False
# we use the html parser to parse the url content and store it in a variable.
page_content = BeautifulSoup(page_response.content, "html.parser")
for i in page_content.find_all('div', attrs={'class':'prodPrice'}):
parent = i.parent.parent.contents[0]
getparentfunc= parent.find("a", attrs={"href": "javascript:void(0)"})
itemid = re.search(".*'(\d+)'.*", getparentfunc.attrs['onclick']).groups()[0]
itemName = re.sub(r'\W+', ' ', i.parent.contents[0].text)
priceitem = re.sub(r'[\D.]+ ', ' ', i.text)
itemdict[itemid] = [itemName, priceitem]
效率重试机制解析或任何其他简单方法将不胜感激 谢谢 等
答案 0 :(得分:1)
我通常会做类似的事情:
def get(url, retries=3):
try:
r = requests.get(url)
return r
except ValueError as err:
print(err)
if retries < 1:
raise ValueError('No more retries!')
return get(href, retries - 1)