如何为每个项调用Parse_page2方法

时间:2013-05-28 11:54:57

标签: python scrapy

我想为每个项目调用parse_page2方法。但每次我运行这个蜘蛛我每页只能获得一个项目,所以我如何为每个项目调用parse_page2方法。

from scrapy.http import Request
from eScraper.items import EscraperItem
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.spiders import CrawlSpider

#------------------------------------------------------------------------------ 

class ESpider(CrawlSpider):
    name = "atisundarSpider"

    allowed_domains = ["atisundar.com"]

    URLSList = []

    for n in range (1,20):

        URLSList.append('http://atisundar.com/collections/sarees?page=' + str(n))
        URLSList.append('http://atisundar.com/collections/salwar-suits?page=' + str(n))

    start_urls = URLSList

    def parse(self, response):

        item = EscraperItem()
        hxs = HtmlXPathSelector(response)
        sites = hxs.select('//div[@class="block product size-medium"]')
        items = []

        for site in sites:
            item = EscraperItem()
            item['productDesc'] = "" 
            item['productSite'] = ["http://atisundar.com/"]
            item['productTitle'] = site.select('.//div[@class="main"]/a/@title').extract()
            item['productURL'] = ["http://atisundar.com" + site.select('.//div[@class="main"]/a/@href').extract()[0].encode('utf-8')]
            item['productPrice'] = site.select('.//p[@class="pricearea"]//span[@class="was-price"]/text()').extract() + site.select('.//p[@class="pricearea"]//span[@class="price"]/text()').extract() 
            item['productImage'] = [site.select('.//div[@class="main"]/a/img/@src').extract()[0].split('?')[0]] + [site.select('.//div[@class="main"]/a/img/@src').extract()[0].split('?')[0].replace("medium","grande")]
            item['image_urls'] = item['productImage']
            items.append(item)

            secondURL = "http://admin.atisundar.com/store/skuDetails?product_id=" + site.select('.//div[@class="main"]/a/text()').extract()[1].strip().split("#")[-1]

            request = Request(secondURL,
                      callback=self.parse_page2)
            request.meta['item'] = item 

            return request

    def parse_page2(self, response):

        item = response.meta['item']
        #item['other_url'] = response.url
        return item

1 个答案:

答案 0 :(得分:1)

1)您没有使用CrawlSpider功能,我建议您从BaseSpider继承您的蜘蛛

2)在for循环

for site in sites:

使用yield而不是return,否则它将在第一次迭代中打破循环。

yield request

3)在parse_page2中从item获取response.request.meta而不是response.meta

item = response.request.meta['item']

现在应该可以了。

from scrapy.http import Request
from eScraper.items import EscraperItem
from scrapy.selector import HtmlXPathSelector

#------------------------------------------------------------------------------ 
from scrapy.spider import BaseSpider


class ESpider(BaseSpider):
    name = "atisundarSpider"

    allowed_domains = ["atisundar.com"]

    URLSList = []

    for n in range (1,20):

        URLSList.append('http://atisundar.com/collections/sarees?page=' + str(n))
        URLSList.append('http://atisundar.com/collections/salwar-suits?page=' + str(n))

    start_urls = URLSList

def parse(self, response):
    item = EscraperItem()
    hxs = HtmlXPathSelector(response)
    sites = hxs.select('//div[@class="block product size-medium"]')
    for site in sites:
        item = EscraperItem()
        item['productDesc'] = "" 
        item['productSite'] = ["http://atisundar.com/"]
        item['productTitle'] = site.select('.//div[@class="main"]/a/@title').extract()
        item['productURL'] = ["http://atisundar.com" + site.select('.//div[@class="main"]/a/@href').extract()[0].encode('utf-8')]
        item['productPrice'] = site.select('.//p[@class="pricearea"]//span[@class="was-price"]/text()').extract() + site.select('.//p[@class="pricearea"]//span[@class="price"]/text()').extract() 
        item['productImage'] = [site.select('.//div[@class="main"]/a/img/@src').extract()[0].split('?')[0]] + [site.select('.//div[@class="main"]/a/img/@src').extract()[0].split('?')[0].replace("medium","grande")]
        item['image_urls'] = item['productImage']
        secondURL = "http://admin.atisundar.com/store/skuDetails?product_id=" + site.select('.//div[@class="main"]/a/text()').extract()[1].strip().split("#")[-1]
        request = Request(secondURL,
                  callback=self.parse_page2)
        request.meta['item'] = item 
        yield request

def parse_page2(self, response):

    item = response.request.meta['item']
    #item['other_url'] = response.url
    return item