Python Scrapy从下一页按钮继续获得相同的页面链接

时间:2017-05-04 09:05:50

标签: python html xpath css-selectors scrapy

我正在试图抓取amazon.com的产品链接超过800条评论但我不断从下一页按钮获得相同页面链接它一遍又一遍地返回第2页我应该获得第3页,4等等

我设置了一个如果条件溢出并转换为1,020个整数并且比较如果超过800或者那么基于访问页面的比较

这是代码

# -*- coding: utf-8 -*-
import scrapy
from amazon.items import AmazonItem
from urlparse import urljoin


class AmazonspiderSpider(scrapy.Spider):
    name = "amazonspider"
    DOWNLOAD_DELAY = 1
    start_urls = ['https://www.amazon.com/s/ref=lp_165993011_nr_n_0?fst=as%3Aoff&rh=n%3A165793011%2Cn%3A%21165795011%2Cn%3A165993011%2Cn%3A2514571011&bbn=165993011&ie=UTF8&qid=1493778423&rnid=165993011']


    def parse(self, response):


        SET_SELECTOR = '.a-carousel-card.acswidget-carousel__card'
        for attr in response.css(SET_SELECTOR):
            #print '\n\n', attr

            item = AmazonItem()

            review_selector = './/*[@class="acs_product-rating__review-count"]/text()'
            link_selector = './/*[@class="a-link-normal"]/@href'

            if attr.xpath(review_selector).extract_first():
                if int(''.join(attr.xpath(review_selector).extract_first().split(','))) >= 800:
                    url = urljoin(response.url, attr.xpath(link_selector).extract_first())
                    item['LINKS'] = url
                    if url:
                        yield scrapy.Request(url, callback=self.parse_link, meta={'item': item})  


            next_page = './/span[@class="pagnRA"]/a[@id="pagnNextLink"]/@href'
            next_page = response.xpath(next_page).extract_first()
            print '\n\n', urljoin(response.url, next_page)
            if next_page:
                yield scrapy.Request(
                    urljoin(response.url, next_page),
                    callback=self.parse
                )
    def parse_link(self, response):

        item = AmazonItem(response.meta['item'])

        catselector = '.cat-link ::text'
        defaultcatselector = '.nav-search-label ::text'
        cat = response.css(catselector).extract_first()
        if cat:
            item['CATAGORY'] = cat
        else:
            item['CATAGORY'] = response.css(defaultcatselector).extract_first()
        return item

这是我在递归调用解析函数之前打印下一页链接时的输出

herehere

这是页面下一页选择器的屏幕截图 here 我哪里出错?

1 个答案:

答案 0 :(得分:2)

将下一页代码块移到循环外。

class AmazonspiderSpider(scrapy.Spider):
name = "amazonspider"
DOWNLOAD_DELAY = 1
start_urls = ['https://www.amazon.com/s/ref=lp_165993011_nr_n_0?fst=as%3Aoff&rh=n%3A165793011%2Cn%3A%21165795011%2Cn%3A165993011%2Cn%3A2514571011&bbn=165993011&ie=UTF8&qid=1493778423&rnid=165993011']


def parse(self, response):


    SET_SELECTOR = '.a-carousel-card.acswidget-carousel__card'
    for attr in response.css(SET_SELECTOR):
        #print '\n\n', attr


        review_selector = './/*[@class="acs_product-rating__review-count"]/text()'
        link_selector = './/*[@class="a-link-normal"]/@href'

        if attr.xpath(review_selector).extract_first():
            if int(''.join(attr.xpath(review_selector).extract_first().split(','))) >= 800:
                url = urljoin(response.url, attr.xpath(link_selector).extract_first())


   next_page = './/span[@class="pagnRA"]/a[@id="pagnNextLink"]/@href'
   next_page = response.xpath(next_page).extract_first()
   print '\n\n', urljoin(response.url, next_page)

   if next_page:
       yield scrapy.Request(
           urljoin(response.url, next_page),
           callback=self.parse
       )