Scrapy和Selenium:只报废两页

时间:2014-08-09 05:48:24

标签: python selenium-webdriver scrapy-spider

我想抓取一个网站,有10多页 每个页面都有10个链接,蜘蛛会获得链接def parse():
并转到链接以抓取我想要的其他数据def parse_detail():

请指导我如何写入只抓取两页而不是所有页面THX 这是我的代码,它只抓取一页而不是蜘蛛关闭

def __init__(self):
    self.driver = webdriver.Firefox()
    dispatcher.connect(self.spider_closed, signals.spider_closed)

def parse(self, response):
    self.driver.implicitly_wait(20) 
    self.driver.get(response.url)
    sites = self.driver.find_elements_by_css_selector("")
    for site in sites:
        item = CItem()
        linkiwant = site.find_element_by_css_selector(" ") 
        start = site.find_element_by_css_selector(" ")  
        item['link'] = linkiwant.get_attribute("href") 
        item['start_date']  = start.text
        yield Request(url=item['link'], meta={'item':item}, callback=self.parse_detail)  

    #how to write to only catch 2 pages??
    i=0
    if i< 2:
        try:
            next = self.driver.find_element_by_xpath("/li[@class='p_next'][1]")   
            next_page = next.text
            if next_page == "next_page":  
                next.click()    
                self.driver.refresh()  
                yield Request(self.driver.current_url, callback=self.parse)
                i+=1
        except:
             print "page not found"     
def parse_detail(self,response):
    item = response.meta['item']
    self.driver.implicitly_wait(20)  
    self.driver.get(response.url)
    sel = Selector(response)
    sites = sel.css("")            
    for site in sites:
        item['title'] = site.css(" ").extract()[0] 
        item['titleURL'] = site.css(" ").extract()[0]
        ..
        yield item   
def spider_closed(self, spider):
    self.driver.close()

1 个答案:

答案 0 :(得分:0)

i持久:

def __init__(self):
    self.page_num = 0
    self.driver = webdriver.Firefox()
    dispatcher.connect(self.spider_closed, signals.spider_closed)
    #how to write to only catch 2 pages??
    if self.page_num < 2:
        try:
            next = self.driver.find_element_by_xpath("/li[@class='p_next'][1]")   
            next_page = next.text
            if next_page == "next_page":  
                next.click()    
                self.driver.refresh()  
                yield Request(self.driver.current_url, callback=self.parse)
                self.page_num += 1
        except:
             print "page not found"