class SomewebsiteProductSpider(scrapy.Spider):
name = "somewebsite"
allowed_domains = ["somewebsite.com"]
start_urls = [
]
def parse(self, response):
items = somewebsiteItem()
title = response.xpath('//h1[@id="title"]/span/text()').extract()
sale_price = response.xpath('//span[contains(@id,"ourprice") or contains(@id,"saleprice")]/text()').extract()
category = response.xpath('//a[@class="a-link-normal a-color-tertiary"]/text()').extract()
availability = response.xpath('//div[@id="availability"]//text()').extract()
items['product_name'] = ''.join(title).strip()
items['product_sale_price'] = ''.join(sale_price).strip()
items['product_category'] = ','.join(map(lambda x: x.strip(), category)).strip()
items['product_availability'] = ''.join(availability).strip()
fo = open("C:\\Users\\user1\PycharmProjects\\test.txt", "w")
fo.write("%s \n%s \n%s" % (items['product_name'], items['product_sale_price'], self.start_urls))
fo.close()
print(items)
yield items
test.py
process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
})
process.crawl(SomewebsiteProductSpider)
process.start()
如何将动态start_urls列表传递给" SomewebsiteProductSpiders"启动爬网过程之前来自test.py的对象?任何帮助,将不胜感激。 谢谢。
答案 0 :(得分:1)
process.crawl
接受传递给spider的构造函数的可选参数,因此您可以从spider start_urls
填充__init__
或使用自定义start_requests
程序。例如
<强> test.py 强>
...
process.crawl(SomewebsiteProductSpider, url_list=[...])
<强> somespider.py 强>
class SomewebsiteProductSpider(scrapy.Spider):
...
def __init__(self, *args, **kwargs):
self.start_urls = kwargs.pop('url_list', [])
super(SomewebsiteProductSpider, *args, **kwargs)
答案 1 :(得分:0)
只需将start_urls作为参数,您就可以避免从@mizghun的答案中解析出额外的kwarg。
import scrapy
from scrapy.crawler import CrawlerProcess
class QuotesSpider(scrapy.Spider):
name = 'quotes'
def parse(self, response):
print(response.url)
process = CrawlerProcess()
process.crawl(QuotesSpider, start_urls=["http://example.com", "http://example.org"])
process.start()