我是Scrapy的新手。我正在尝试使用媒体管道下载文件。但是当我运行蜘蛛时,没有文件存储在文件夹中。
蜘蛛:
import scrapy
from scrapy import Request
from pagalworld.items import PagalworldItem
class JobsSpider(scrapy.Spider):
name = "songs"
allowed_domains = ["pagalworld.me"]
start_urls =['https://pagalworld.me/category/11598/Latest%20Bollywood%20Hindi%20Mp3%20Songs%20-%202017.html']
def parse(self, response):
urls = response.xpath('//div[@class="pageLinkList"]/ul/li/a/@href').extract()
for link in urls:
yield Request(link, callback=self.parse_page, )
def parse_page(self, response):
songName=response.xpath('//li/b/a/@href').extract()
for song in songName:
yield Request(song,callback=self.parsing_link)
def parsing_link(self,response):
item= PagalworldItem()
item['file_urls']=response.xpath('//div[@class="menu_row"]/a[@class="touch"]/@href').extract()
yield{"download_link":item['file_urls']}
项目文件:
import scrapy
class PagalworldItem(scrapy.Item):
file_urls=scrapy.Field()
设置文件:
BOT_NAME = 'pagalworld'
SPIDER_MODULES = ['pagalworld.spiders']
NEWSPIDER_MODULE = 'pagalworld.spiders'
ROBOTSTXT_OBEY = True
CONCURRENT_REQUESTS = 5
DOWNLOAD_DELAY = 3
ITEM_PIPELINES = {
'scrapy.pipelines.files.FilesPipeline': 1
}
FILES_STORE = '/tmp/media/'
答案 0 :(得分:3)
def parsing_link(self,response):
item= PagalworldItem()
item['file_urls']=response.xpath('//div[@class="menu_row"]/a[@class="touch"]/@href').extract()
yield{"download_link":item['file_urls']}
你屈服于:
yield {"download_link": ['http://someurl.com']}
要使scrapy的媒体/文件管道工作,您需要生成包含file_urls
字段的项目。所以试试这个:
def parsing_link(self,response):
item= PagalworldItem()
item['file_urls']=response.xpath('//div[@class="menu_row"]/a[@class="touch"]/@href').extract()
yield item