通过扭曲的inlineCallbacks

时间:2017-04-21 13:51:19

标签: python scrapy twisted scrapy-spider reactor

我有ImportError: No module named 'spiders'所以我认为蜘蛛调用发生时没有环境变量。但我并不完全明白如何让它们正常工作。

基本上,我想运行一些Scrapy蜘蛛,它们会填充db,然后我的程序应该进行小的计算。这应该定期发生(比方说每分钟)。由于在scrapy依赖中已经扭曲了,我决定将它结合起来。 项目结构就像(简单化):

 -Project
        |-src
            |- __init__.py
            |- spiders.py
        |-bot.py

在spiders.py中我有2个单独的蜘蛛,当我在该文件中启动它们时它们运行良好。 但现在我在bot.py中加入了一些逻辑并提出:

from scrapy.crawler import CrawlerRunner
from scrapy.utils.project import get_project_settings
from twisted.internet import task
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue

from src.spiders import first_spider, second_spider

def do_some_stuff(): pass

if __name__ == '__main__':
    runner = CrawlerRunner(get_project_settings())

    @inlineCallbacks
    def cycle():
        yield runner.crawl(first_spider)
        yield runner.crawl(second_spider)
        returnValue(do_some_stuff())


    timeout = 60.0

    l = task.LoopingCall(cycle)
    l.start(timeout)

    reactor.run()

错误跟踪:

    2017-04-21 15:32:26 [scrapy.middleware] INFO: Enabled extensions:
['scrapy.extensions.corestats.CoreStats',
 'scrapy.extensions.logstats.LogStats',
 'scrapy.extensions.telnet.TelnetConsole']
2017-04-21 15:32:26 [scrapy.middleware] INFO: Enabled downloader middlewares:
['scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware',
 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware',
 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware',
 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware',
 'scrapy.downloadermiddlewares.retry.RetryMiddleware',
 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware',
 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware',
 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware',
 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware',
 'scrapy.downloadermiddlewares.stats.DownloaderStats']
2017-04-21 15:32:26 [scrapy.middleware] INFO: Enabled spider middlewares:
['scrapy.spidermiddlewares.httperror.HttpErrorMiddleware',
 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware',
 'scrapy.spidermiddlewares.referer.RefererMiddleware',
 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware',
 'scrapy.spidermiddlewares.depth.DepthMiddleware']
Unhandled error in Deferred:
2017-04-21 15:32:26 [twisted] CRITICAL: Unhandled error in Deferred:

2017-04-21 15:32:26 [twisted] CRITICAL: 
Traceback (most recent call last):
  File "projectpath/venv/lib/python3.5/site-packages/twisted/internet/defer.py", line 1299, in _inlineCallbacks
    result = result.throwExceptionIntoGenerator(g)
  File "projectpath/venv/lib/python3.5/site-packages/twisted/python/failure.py", line 393, in throwExceptionIntoGenerator
    return g.throw(self.type, self.value, self.tb)
  File "projectpath/bot.py", line 141, in cycle
    yield runner.crawl(first_spider)
ImportError: No module named 'spiders'

更新。在spiders.py中导入:

import hashlib
import json

import pymongo
import scrapy

from scrapy.crawler import CrawlerRunner
from scrapy.exceptions import DropItem
from scrapy.utils.log import configure_logging
from scrapy.utils.project import get_project_settings
from twisted.internet import reactor

1 个答案:

答案 0 :(得分:1)

所以您的项目结构是

.
├── bot.py
└── src
    ├── __init__.py
    └── spiders.py

要运行它,您应按以下方式使用PYTHONPATH

$ PYTHONPATH=. python3 bot.py

这是一个基于功能的基于一文件的scrapy项目,它将每60秒执行一次循环scrape。

# scraper.py
import datetime
import json
import scrapy
from scrapy.crawler import CrawlerRunner
from scrapy.item import Item, Field
from scrapy.utils.log import configure_logging
from twisted.internet import reactor
from twisted.internet import task
from twisted.internet.defer import inlineCallbacks

class JsonWriterPipeline(object):
    def open_spider(self, spider):
        self.file = open(spider.settings['JSON_FILE'], 'a')

    def close_spider(self, spider):
        self.file.close()

    def process_item(self, item, spider):
        line = json.dumps(dict(item)) + "\n"
        self.file.write(line)
        return item

class QuoteItem(Item):
    text = Field()
    author = Field()
    tags = Field()
    spider = Field()

class QuotesSpiderOne(scrapy.Spider):
    name = "quotes1"

    def start_requests(self):
        urls = ['http://quotes.toscrape.com/page/1/', ]
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        for quote in response.css('div.quote'):
            item = QuoteItem()
            item['text'] = quote.css('span.text::text').get()
            item['author'] = quote.css('small.author::text').get()
            item['tags'] = quote.css('div.tags a.tag::text').getall()
            item['spider'] = self.name
            yield item

class QuotesSpiderTwo(scrapy.Spider):
    name = "quotes2"

    def start_requests(self):
        urls = ['http://quotes.toscrape.com/page/2/', ]
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        for quote in response.css('div.quote'):
            item = QuoteItem()
            item['text'] = quote.css('span.text::text').get()
            item['author'] = quote.css('small.author::text').get()
            item['tags'] = quote.css('div.tags a.tag::text').getall()
            item['spider'] = self.name
            yield item

def do_some_stuff():
    print(datetime.datetime.now().strftime("%H:%M:%S"))

@inlineCallbacks
def cycle():
    yield runner.crawl(QuotesSpiderOne)
    yield runner.crawl(QuotesSpiderTwo)
    return do_some_stuff()

if __name__ == '__main__':
    settings = dict()
    settings['USER_AGENT'] = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
    settings['HTTPCACHE_ENABLED'] = True
    settings['JSON_FILE'] = 'items.jl'
    settings['ITEM_PIPELINES'] = dict()
    settings['ITEM_PIPELINES']['__main__.JsonWriterPipeline'] = 800

    configure_logging()
    runner = CrawlerRunner(settings=settings)
    timeout = 60.0

    l = task.LoopingCall(cycle)
    l.start(timeout)

    reactor.run()

要运行

$ python3 scraper.py

一个文件抓取项目的一个优点是易于生成pyinstaller二进制文件。

可以使用reactor.callLater构造替代循环逻辑。这允许在timeout中修改cycle

# above code stays the same

@inlineCallbacks
def cycle(runner, timeout):
    yield runner.crawl(QuotesSpiderOne)
    yield runner.crawl(QuotesSpiderTwo)
    do_some_stuff()
    reactor.callLater(timeout, cycle, runner, timeout)

def main():
    settings = dict()
    settings['USER_AGENT'] = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
    settings['HTTPCACHE_ENABLED'] = True
    settings['JSON_FILE'] = 'items.jl'
    settings['ITEM_PIPELINES'] = dict()
    settings['ITEM_PIPELINES']['__main__.JsonWriterPipeline'] = 800

    configure_logging()
    runner = CrawlerRunner(settings=settings)
    timeout = 60.0

    reactor.callLater(timeout, cycle, runner, timeout)
    reactor.run()

if __name__ == '__main__':
    main()