在每个页面中抓取多页数据

时间:2014-05-28 16:48:26

标签: python web-scraping scrapy

所以我正在使用4个不同的个人"项目" .....这里有两个例子。

from scrapy.spider import BaseSpider
from scrapy.selector import Selector
from scrapy.exceptions import CloseSpider
from scrapy.http import Request
from botg.items import BotgItem


URL = "http://store.tcgplayer.com/magic/born-of-the-gods?PageNumber=%d"

class MySpider(BaseSpider):
name = "tcg"
allowed_domains = ["tcgplayer.com"]
start_urls = [URL % 1]

def __init__(self):
    self.page_number = 1

def parse(self, response):
    print self.page_number
    print "--------------------BREAK-------------------------"

    sel = Selector(response)
    titles = sel.xpath("//div[@class='magicCard']")
    if not titles:
        raise CloseSpider('No more pages')

    for title in titles:
        item = BotgItem()
        item["cardname"] = title.xpath(".//li[@class='cardName']/a/text()").extract()[0]

        vendor = title.xpath(".//tr[@class='vendor ']")
        item["price"] = vendor.xpath("normalize-space(.//td[@class='price']/text())").extract()
        item["quantity"] = vendor.xpath("normalize-space(.//td[@class='quantity']/text())").extract()
        item["shipping"] = vendor.xpath("normalize-space(.//span[@class='shippingAmount']/text())").extract()
        item["condition"] = vendor.xpath("normalize-space(.//td[@class='condition']/a/text())").extract()
        item["vendors"] = vendor.xpath("normalize-space(.//td[@class='seller']/a/text())").extract()
        yield item

    self.page_number += 1
    yield Request(URL % self.page_number)

有一个......而另一个是:

from scrapy.spider import BaseSpider
from scrapy.selector import Selector
from scrapy.exceptions import CloseSpider
from scrapy.http import Request
from tcgplayer1.items import Tcgplayer1Item


URL = "http://store.tcgplayer.com/magic/journey-into-nyx?pageNumber=%d"

class MySpider(BaseSpider):
name = "tcg"
allowed_domains = ["tcgplayer.com"]
start_urls = [URL % 1]

def __init__(self):
    self.page_number = 1

def parse(self, response):
    print self.page_number
    print "----------"

    sel = Selector(response)
    titles = sel.xpath("//div[@class='magicCard']")
    if not titles:
        raise CloseSpider('No more pages')

    for title in titles:
        item = Tcgplayer1Item()
        item["cardname"] = title.xpath(".//li[@class='cardName']/a/text()").extract()[0]

        vendor = title.xpath(".//tr[@class='vendor ']")
        item["price"] = vendor.xpath("normalize-space(.//td[@class='price']/text())").extract()
        item["quantity"] = vendor.xpath("normalize-space(.//td[@class='quantity']/text())").extract()
        item["shipping"] = vendor.xpath("normalize-space(.//span[@class='shippingAmount']/text())").extract()
        item["condition"] = vendor.xpath("normalize-space(.//td[@class='condition']/a/text())").extract()
        item["vendors"] = vendor.xpath("normalize-space(.//td[@class='seller']/a/text())").extract()
        yield item

    self.page_number += 1
    yield Request(URL % self.page_number)

我想将所有这些整合到一个项目中,该项目可以抓取4页并且每个页面都要经过~8页的数据。我认为这需要一个" master" items文件,但我似乎无法使爬虫通过多个页面及其数据。

0 个答案:

没有答案
相关问题