我写了一个蜘蛛来抓取一个网站。 我能够生成所有页面URL(分页)。 我需要帮助来抓取所有这些页面,然后打印响应。
URL_STRING = “http://website.com/ct-50658/page-”
class SpiderName(Spider):
name="website"
allowed_domains=["website.com"]
start_urls=["http://website.com/page-2"]
def printer(self, response):
hxs=HtmlXPathSelector(response)
x=hxs.select("//span/a/@title").extract()
with open('website.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for i in x:
spamwriter.writerow(i)
def parse(self,response):
hxs=HtmlXPathSelector(response)
#sel=Selector(response)
pages=hxs.select("//div[@id='srchpagination']/a/@href").extract()
total_pages=int(pages[-2][-2:])
j=0
url_list=[]
while (j<total_pages):
j=j+1
urls=url_string+str(j)
url_list.append(urls)
for one_url in url_list:
request= Request(one_url, callback=self.printer)
return request
答案 0 :(得分:1)
您正在为每个one_url
请求的响应重新创建'website.csv'文件。您可能应该创建一次(例如在__init__
中)并在蜘蛛的属性中保存CSV Writer引用,在def打印机中引用类似self.csvwriter的内容。
此外,在for one_url in url_list:
循环中,您应该使用yield Request(one_url, callback=self.printer)
。在这里,您只返回最后一个请求
这是一个带有这些修改和一些代码简化的示例蜘蛛:
class SpiderName(Spider):
name="website"
allowed_domains=["website.com"]
start_urls=["http://website.com/page-2"]
def __init__(self, category=None, *args, **kwargs):
super(SpiderName, self).__init__(*args, **kwargs)
self.spamwriter = csv.writer(open('website.csv', 'wb'),
delimiter=' ',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
def printer(self, response):
hxs = HtmlXPathSelector(response)
for i in hxs.select("//span/a/@title").extract():
self.spamwriter.writerow(i)
def parse(self,response):
hxs=HtmlXPathSelector(response)
#sel=Selector(response)
pages = hxs.select("//div[@id='srchpagination']/a/@href").extract()
total_pages = int(pages[-2][-2:])
while j in range(0, total_pages):
yield Request(url_string+str(j), callback=self.printer)