scrapy:通过多个解析传输项目并收集数据

时间:2013-02-10 14:16:21

标签: python scrapy

我第一次尝试填充项目,同时将其从一页传送到另一页。

它适用于每个循环,性别信息也在parse_3中正确到达,但g2不适合响应url的类别,g1(第一个类别级别)始终是列表中的最后一个元素,我在parse_sub中循环...

我肯定做错了什么,但我找不到问题,如果有人能解释我的工作原理会很好。

最佳, 千斤顶

class xspider(BaseSpider):
    name = 'x'
    allowed_domains = ['x.com']
    start_urls = ['http://www.x.com']

    def parse(self, response):
        hxs = HtmlXPathSelector(response)
        maincats = hxs.select('//ul[@class="Nav"]/li/a/@href').extract()[1:3]
        for maincat in maincats:
            item = catItem()
            if 'men' in maincat:
                item['gender'] = 'men'
                maincat = 'http://www.x.com' + maincat
                request = Request(maincat, callback=self.parse_sub)
                request.meta['item'] = item
            if 'woman' in maincat:
                item['gender'] = []
                item['gender'] = 'woman'
                maincat = 'http://www.x.com' + maincat
                request = Request(maincat, callback=self.parse_sub)
                request.meta['item'] = item
            yield request

    def parse_sub(self, response):
        i = 0
        hxs = HtmlXPathSelector(response)
        subcats = hxs.select('//ul[@class="sub Sprite"]/li/a/@href').extract()[0:5]
        text = hxs.select('//ul[@class="sub Sprite"]/li/a/span/text()').extract()[0:5]
        for item in text:
            item = response.meta['item']
            subcat = 'http://www.x.com' + subcats[i]
            request = Request(subcat, callback=self.parse_subcat)
            item['g1'] = text[i]
            item['gender'] = response.request.meta['item']
            i = i + 1
            request.meta['item'] = item
            yield request

    def parse_subcat(self, response):
        hxs = HtmlXPathSelector(response)
        test = hxs.select('//ul[@class="sub"]/li/a').extract()
        for s in test:
            item = response.meta['item']
            item['g2'] = []
            item['g2'] = hxs.select('//span[@class="Active Sprite"]/text()').extract()[0]
            s = s.encode('utf-8','ignore')
            link = s[s.find('href="')+6:][:s[s.find('href="')+6:].find('/"')]
            link = 'http://www.x.com/' + str(link) + '/'
            request = Request(link, callback=self.parse_3)
            request.meta['item'] = item
            yield request

    def parse_3(self, response):
        item = response.meta['item']
        print item

1 个答案:

答案 0 :(得分:2)

def parse_subcat(self, response):
    hxs = HtmlXPathSelector(response)
    test = hxs.select('//ul[@class="sub"]/li/a').extract()
    for s in test:
        item = response.meta['item']
        item['g2'] = []
        item['g2'] = hxs.select('//span[@class="Active Sprite"]/text()').extract()[0]
        s = s.encode('utf-8','ignore')
        link = s[s.find('href="')+6:][:s[s.find('href="')+6:].find('/"')]
        link = 'http://www.x.com/' + str(link) + '/'
        request = Request(link, callback=self.parse_3)
        request.meta['item'] = item
        yield request

响应不包含meta但请求如此 遗赠item = response.meta['item'] 它应该是item = response.request.meta['item']