我是python和scrapy的新手。最近我试图抓取一个有超过3,000页的视频网站,每个页面包含20个视频。我使用scrapy
来获取网址(包括视频和下一页的来源)和requests
来下载视频。问题是,脚本运行没有任何错误,但爬行速度相当慢(约4页/分钟)。我想这是因为scrapy正在阻止下一个请求,或暂停下载过程,因为正在下载视频?如果是这样,有什么方法可以提高scrapy的速度,而不是使用multithreading
编写另一个脚本?这是我的脚本:
setting.py:
BOT_NAME = 'video'
SPIDER_MODULES = ['video.spiders']
NEWSPIDER_MODULE = 'video.spiders'
LOG_LEVEL= 'DEBUG'
DUPEFILTER_CLASS= 'video.custom_filters.ViewKeyFilter'
DOWNLOAD_DELAY= 1
ITEM_PIPELINES= {'video.pipelines.VideoPipeline': 1,
'video.pipelines.Duplicates_Pipeline':300}
FILES_STORE= '/root/OneDrive/HDvideo'
custom_filters.py
class ViewKeyFilter(RFPDupeFilter):
def __init__(self,path=None,debug=False):
self.urls_seen = set()
RFPDupeFilter.__init__(self,path,debug)
def request_seen(self, request):
if 'viewkey=' in request.url:
pattern = re.compile(r'viewkey=(\w*)')
if pattern.search(request.url).group(1) in self.urls_seen:
return True
else:
self.urls_seen.add(request.url)
spider.py
class videoSpider(scrapy.Spider):
name = 'video'
def start_requests(self):
url = "http://video.com/v.php?next=watch&page=1"
yield Request(url,callback=self.parse,headers=headers,dont_filter=True)
def parse(self, response):
doc = HTML(html=response.text)
if doc.find('a:contains(">")'):
yield Request('http://video.com/v.php' + doc.find('a:contains(">")')[0].attrs['href'],
callback=self.parse, headers=headers,dont_filter=True)
for video_box in doc.find('div.listchannel'):
title = video_box.find('a[target="blank"]')[0].find('img')[0].attrs['title']
if not any(keyword in title for keyword in filterTitle):
SDURL = video_box.find('a[target="blank"]')[0].attrs['href']
if 'viewkey=' in SDURL:
if video_box.find('div.hd-video'):
vkey = pattern.search(SDURL).group(1) # Handling HD videos
yield Request('http://video.com/video_hd.php?viewkey={}'.format(vkey),
callback=self.parse_videoPage, headers=headers, cookies=cookies[random.randint(0,len(dummy_cookie) -1)])
else:
yield Request(SDURL,callback=self.parse_videoPage,
headers=headers)
def parse_videoPage(self,response):
if 'video_missing' not in response.url:
doc = HTML(html=response.text)
title = doc.find('#video-title')[0].text
src = doc.find('source')[0].attrs['src']
item = VideoItem()
item['title'] = title
item['src'] = src
item['vkey'] = pattern.search(response.url).group(1)
self.logger.info('Return item. Title:{} src:{}'.format(title,src))
yield item
pipelines.py
class VideoPipeline(MediaPipeline):
def process_item(self, item,spider):
if item['src']:
try:
r =requests.get(item['src'],stream=True,timeout=5,headers=headers)
log.logger.info('Downloading video: {}'.format(item['title']))
with open('/root/OneDrive/HDvideo/' + item['title'] + '.mp4','wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
log.logger.info('Downloaded video: {}'.format(item['title']))
except:
log.logger.exception('Something bad happened: ')
finally:
return
class Duplicates_Pipeline(object):
def __init__(self):
self.vkeys_seen = set()
def process_item(self,item,spider):
if item is not None:
if item['vkey'] in self.vkeys_seen:
raise DropItem('Duplicate item found: {}'.format(item))
else:
self.vkeys_seen.add(item['vkey'])
return item
视频将保存到/root/OneDrive/
,这是一个使用rclone
安装在我的Vultr VPS上的目录。对不起我的英语不好。任何帮助将不胜感激。