我已经创建了从网站中提取一些链接的代码(PDF链接),现在我需要下载这些PDF文件,但是我在努力做到这一点上却很费力。这是代码:
import scrapy
class all5(scrapy.Spider):
name = "all5"
start_urls = [
'https://www.alloschool.com/course/alriadhiat-alaol-ibtdaii',
]
def parse(self, response):
for link in response.css('.default .er').xpath('@href').extract():
url=response.url
path=response.css('ol.breadcrumb li a::text').extract()
next_link = response.urljoin(link)
yield scrapy.Request(next_link,callback=self.parse_det,meta={'url' : url,'path':path})
def parse_det(self, response):
def extract_with_css(query):
return response.css(query).get(default='').strip()
yield {
'path':response.meta['path'],
'finallink': extract_with_css('a.btn.btn-primary::attr(href)'),
'url':response.meta['url']
}
我需要下载的链接是“ finallink”。
我该如何解决问题?
答案 0 :(得分:0)
在设置中,您必须激活管道
ITEM_PIPELINES = {'scrapy.pipelines.files.FilesPipeline': 1}
和用于下载文件的文件夹
'FILES_STORE' = '.'
它将下载到FILES_STORE/full
并且在生成数据时必须使用名称files_url
yield {
'file_urls': [extract_with_css('a.btn.btn-primary::attr(href)')]
# ... rest ...
}
即使您要下载一个文件,它也必须是列表。
它应该将PDF下载到具有唯一名称的文件中,而您在字段files
的数据中获得
草率的文档:Downloading and processing files and images
编辑:独立代码-您无需创建项目即可复制和运行。
#!/usr/bin/env python3
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
start_urls = [
'https://www.alloschool.com/course/alriadhiat-alaol-ibtdaii',
]
def parse(self, response):
for link in response.css('.default .er').xpath('@href').extract():
url = response.url
path = response.css('ol.breadcrumb li a::text').extract()
next_link = response.urljoin(link)
yield scrapy.Request(next_link, callback=self.parse_det, meta={'url': url, 'path': path})
def parse_det(self, response):
def extract_with_css(query):
return response.css(query).get(default='').strip()
yield {
'path':response.meta['path'],
'file_urls': [extract_with_css('a.btn.btn-primary::attr(href)')],
'url':response.meta['url']
}
from scrapy.crawler import CrawlerProcess
c = CrawlerProcess({
'USER_AGENT': 'Mozilla/5.0',
# save in file as CSV, JSON or XML
'FEED_FORMAT': 'csv', # csv, json, xml
'FEED_URI': 'output.csv', #
# download files to `FILES_STORE/full`
# it needs `yield {'file_urls': [url]}` in `parse()`
'ITEM_PIPELINES': {'scrapy.pipelines.files.FilesPipeline': 1},
'FILES_STORE': '.',
})
c.crawl(MySpider)
c.start()