Ні!蜘蛛为什么不通过页面?我使用规则......我做错了什么? Іt仅适用于一页。这是代码:
# -*- encoding: -*-
class JobSpider(CrawlSpider):
name = 'superjob'
allowed_domains = ['superjob.ru']
start_urls = [
'http://www.superjob.ru/vacancy/search/?t%5B0%5D=4&sbmit=1&period=7'
]
rules = [
Rule(SgmlLinkExtractor(allow='/vacancy/search/?',
restrict_xpaths=(
u'//a[@class="h_border_none"]/<span>следующая</span>')),
callback='parse',
follow=True),
]
def parse(self, response):
hxs = HtmlXPathSelector(response)
titles = hxs.select(
'//*[@id="ng-app"]/div[2]/div/div[2]/div/div[1]/div[2]/div/div/h2/a')
items = []
for title in titles:
item = JobItem()
item['title'] = title.select('//h2/a/text()').extract()
items.append(item)
# return items
答案 0 :(得分:1)
要解决的5件事:
restrict_xpaths
应指向分页块callback
应该被称为parse()
LinkExtractor
,SgmlLinkExtractor
已弃用xpath()
代替select()
response.xpath()
快捷方式text()
修正版:
# -*- coding: utf-8 -*-
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class JobSpider(CrawlSpider):
name = 'superjob'
allowed_domains = ['superjob.ru']
start_urls = [
'http://www.superjob.ru/vacancy/search/?t%5B0%5D=4&sbmit=1&period=7'
]
rules = [
Rule(LinkExtractor(allow='/vacancy/search/\?', restrict_xpaths=u'//div[@class="Paginator_navnums"]'),
callback='parse_item',
follow=True),
]
def parse_item(self, response):
titles = response.xpath('//*[@id="ng-app"]/div[2]/div/div[2]/div/div[1]/div[2]/div/div/h2/a')
for title in titles:
item = JobItem()
item['title'] = title.xpath('text()').extract()
yield item