我正在寻找一个代码,它将通过迭代所有内部链接[绝对和放大]从网站获取所有内部链接相对]发现。
到目前为止,我设法写了这么多,但无法在程序中构建正确的逻辑。
import requests, csv, time
from lxml import html
from collections import OrderedDict
links = []
domain = 'bunchball.com'
base_link = 'http://www.bunchball.com/'
unique_list = []
def get_links(base_link):
r = requests.get(base_link)
source = html.fromstring(r.content)
link = source.xpath('//a/@href')
for each in link:
each = str(each)
if domain in each:
links.append(each)
elif each.startswith('/'):
links.append(base_link+each)
unique_list.append(each)
else:
pass
get_links(base_link)
#while
for each1 in list(OrderedDict.fromkeys(links)):
get_links(each1)
while each1 not in unique_list:
unique_list.append(each1)
get_links(each1)
答案 0 :(得分:1)
尝试使用mechanize来获得更简单的解决方案:
from mechanize import Browser
br = Browser()
br.open("http://www.bunchball.com/")
list_of_links=[link for link in br.links()]