使用属性在页面中刮擦.css

时间:2020-01-13 19:43:02

标签: python scrapy

我正在此页面上尝试使用scrapy:http://it.rs-online.com/web/p/sensori-di-prossimita-induttivi/7858468/

但是我无法带出产品的图片,找不到我可能缺少的任何东西?

我按属性,按ID,按类都没有尝试

import scrapy
from scrapy import Request
import random



class BrickSetSpider(scrapy.Spider):
    name = 'spider'
    USER_AGENT_LIST = [
        'Mozilla/5.0 (X11; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0',
    ]
    start_urls = [
        'https://it.rs-online.com/web/p/sensori-di-prossimita-induttivi/7858468/',

    ]
    download_delay = 5
    FEED_EXPORT_ENCODING = 'utf-8'


    def start_requests(self):
        for url in self.start_urls:
            headers = {'User-Agent': random.choice(self.USER_AGENT_LIST)}
            yield Request(url, headers=headers)



    def parse(self, response):
        SET_SELECTOR = '.content-left'
        for brickset in response.css(SET_SELECTOR):
            SEARCH_SELECTOR = response.url
            NAME_SELECTOR = 'span.keyValue span  ::text'
            IMAGE_SELECTOR = 'img[itemprop="image"] ::attr(src)'

            yield {
                'search': SEARCH_SELECTOR,
                'name': brickset.css(NAME_SELECTOR).re('[^\t\n]+'),
                'link': brickset.css(IMAGE_SELECTOR).extract(),

            }

2 个答案:

答案 0 :(得分:2)

如果您使用的是Chrome,则可以在控制台$$(".images [data-test='zoom-wrap'] img")中对其进行测试以获取图像。

因此,您可以在Scrapy代码中使用此CSS选择器。您将必须提取src参数。

希望对您有帮助!

答案 1 :(得分:1)

图像是由JS动态生成的。尝试以下代码。

z

结果:

from simplified_scrapy.spider import Spider, SimplifiedDoc
import re
class MySpider(Spider):
  name = 'rs-online.com'
  # allowed_domains = ['example.com']
  start_urls = [
    'https://it.rs-online.com/web/p/sensori-di-prossimita-induttivi/7858468/'
  ]
  # refresh_urls = True # For debug. If efresh_urls = True, start_urls will be crawled again.

  def extract(self, url, html, models, modelNames):
    doc = SimplifiedDoc(html)
    # print (doc.html)
    div = doc.getElementByClass('content-left')
    imgs = re.compile(u'largeImageURL: ".*"').findall(div.script.html)
    imgs = ['https:'+img[len('largeImageURL: "'):-1] for img in imgs]
    lis = doc.getElementByClass('keyDetailsLL').lis
    names = {}
    for li in lis:
      spans=li.spans
      names[spans[0].text]=spans[1].text
    data = [{'imgs':imgs,'names':names}]
    print (data)
    return {"Urls": [], "Data": data} # Return data to framework

from simplified_scrapy.simplified_main import SimplifiedMain
SimplifiedMain.startThread(MySpider()) # Start crawling