我是python和scrapy的初学者。 我需要帮助,我需要提取一个产品列表,但他的网站有一个“查看更多”的产品,最后用ajax执行text / html请求并加载另一个带有新产品的html。
import scrapy
from scrapy.http import Request
class ProdSpider(scrapy.Spider):
name = "easy"
allowed_domains = ["easy.com.ar"]
start_urls = ["https://www.easy.com.ar/webapp/wcs/stores/servlet/es/easyar/search/AjaxCatalogSearchResultContentView?searchTermScope=&searchType=1002&filterTerm=&orderBy=&maxPrice=&showResultsPage=true&langId=-5&sType=SimpleSearch&metaData=&pageSize=12&manufacturer=&resultCatEntryType=&catalogId=10051&pageView=image&searchTerm=&minPrice=&categoryId=39652&storeId=10151&beginIndex=12"]
beginIndex_index = 12
def parse(self, response):
SECTION_SELECTOR = '.thumb-product'
for soar in response.css(SECTION_SELECTOR):
Link = 'div.dojoDndItem a ::attr(href)'
# Marca = 'p.brand a ::text'
Nombre = 'div.thumb-name a ::text'
# Link = 'p.brand a ::attr(href)'
# SKU = './/p[@class="sku"]/text()' #p.sku ::text'
Price = './/span[@id="tarj-mas-edit"]/text()' #["0"].parentElement.innerText .//span[@class="thumb-price-e"]/text()
yield {
'Link': soar.css(Link).extract_first(),
# 'Marca': soar.css(Marca).extract_first(),
'Nombre': soar.css(Nombre).re_first(r'\n\s*(.*)'), # Limpia espacios y caracteres especiales
# 'Link': soar.css(Link).extract_first(),
# 'SKU': soar.xpath(SKU).re_first(r'SKU:\s*(.*)'),
'Price': soar.xpath(Price).re_first(r'\n\s*(.*)'),
}
# here if no products are available , simply return, means exiting from
# parse and ultimately stops the spider
self.beginIndex_index += 12
if beginIndex_index:
yield Request(url="https://www.easy.com.ar/webapp/wcs/stores/servlet/es/easyar/search/AjaxCatalogSearchResultContentView?searchTermScope=&searchType=1002&filterTerm=&orderBy=&maxPrice=&showResultsPage=true&langId=-5&sType=SimpleSearch&metaData=&pageSize=12&manufacturer=&resultCatEntryType=&catalogId=10051&pageView=image&searchTerm=&minPrice=&categoryId=39652&storeId=10151&beginIndex=%s" % (self.beginIndex_index + 12),
callback=self.parse)
我尝试使用上面的代码,但只有12种产品被捕获。在url中更改的唯一参数是“beginIndex = 12”,Y希望将+ 12加到url,直到产品列表结束。我遇到了这个问题!
谢谢!
答案 0 :(得分:4)
我建议您使用 selenium ,这样您就可以“点击”查看更多按钮,并在蜘蛛内加载更多数据。 这是一个蜘蛛样本(我没有测试它,但这是一般的想法):
Tile
答案 1 :(得分:1)
你明白了!
我在您的网址中看到您有另一个名为pageSize
的参数。我测试了它,网站允许你将它设置为最多50个
要知道何时停止,您可以在产生另一个请求之前测试response.css(SECTION_SELECTOR)
中是否有项目:
import scrapy
from scrapy.http import Request
from scrapy import Selector
class ProdSpider(scrapy.Spider):
name = "easy"
allowed_domains = ["easy.com.ar"]
url = "https://www.easy.com.ar/webapp/wcs/stores/servlet/es/easyar/search/AjaxCatalogSearchResultContentView?searchTermScope=&searchType=1002&filterTerm=&orderBy=&maxPrice=&showResultsPage=true&langId=-5&sType=SimpleSearch&metaData=&pageSize=50&manufacturer=&resultCatEntryType=&catalogId=10051&pageView=image&searchTerm=&minPrice=&categoryId=39652&storeId=10151&beginIndex={pagenum}"
product_fields_xpath = {
'Link': '//a[contains(@id, "CatalogEntry")]/@href',
'Nombre': '//a[contains(@id, "CatalogEntry")]/text()',
'Price': './/span[@class="thumb-price-e"]/text()'
}
section_selector = '//div[@class="thumb-product"]'
begin_index = 0
def start_request(self):
yield Request(url=url.format(pagenum=self.begin_index), method='GET', callback=self.parse)
def parse(self, response):
products = response.xpath(self.section_selector).extract()
n_items = 0
for product in products:
n_items += 1
sel = Selector(text=product)
item = dict()
for k, v in self.product_fields_xpath.iteritems():
item[k] = sel.xpath(v).extract_first()
yield item
self.begin_index += 50
if n_items > 0:
yield Request(url=url.format(pagenum=self.begin_index), method='GET', callback=self.parse)
我没有测试过此代码,但我希望您能理解我的意思。