在Python脚本中使用Scrapy Spider Output的问题

时间:2019-06-29 09:58:39

标签: python scrapy

我想在python脚本中使用蜘蛛的输出。为此,我基于另一个thread编写了以下代码。

我面临的问题是,函数spider_results()一次又一次地返回最后一个项目的列表,而不是包含所有找到的项目的列表。当我使用scrapy crawl命令手动运行同一蜘蛛时,将获得所需的输出。脚本的输出,手动json输出和Spider本身在下面。

我的代码怎么了?

from scrapy import signals
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from circus.spiders.circus import MySpider

from scrapy.signalmanager import dispatcher


def spider_results():
    results = []

    def crawler_results(signal, sender, item, response, spider):
        results.append(item)


    dispatcher.connect(crawler_results, signal=signals.item_passed)

    process = CrawlerProcess(get_project_settings())
    process.crawl(MySpider)
    process.start()  # the script will block here until the crawling is finished
    return results


if __name__ == '__main__':
    print(spider_results())

脚本输出:

{'away_odds': 1.44,
 'away_team': 'Los Angeles Dodgers',
 'event_time': datetime.datetime(2019, 6, 8, 2, 15),
 'home_odds': 2.85,
 'home_team': 'San Francisco Giants',
 'last_update': datetime.datetime(2019, 6, 6, 20, 58, 41, 655497),
 'league': 'MLB'}, {'away_odds': 1.44,
 'away_team': 'Los Angeles Dodgers',
 'event_time': datetime.datetime(2019, 6, 8, 2, 15),
 'home_odds': 2.85,
 'home_team': 'San Francisco Giants',
 'last_update': datetime.datetime(2019, 6, 6, 20, 58, 41, 655497),
 'league': 'MLB'}, {'away_odds': 1.44,
 'away_team': 'Los Angeles Dodgers',
 'event_time': datetime.datetime(2019, 6, 8, 2, 15),
 'home_odds': 2.85,
 'home_team': 'San Francisco Giants',
 'last_update': datetime.datetime(2019, 6, 6, 20, 58, 41, 655497),
 'league': 'MLB'}]

具有抓取抓取的Json输出:

[
{"home_team": "Los Angeles Angels", "away_team": "Seattle Mariners", "event_time": "2019-06-08 02:07:00", "home_odds": 1.58, "away_odds": 2.4, "last_update": "2019-06-06 20:48:16", "league": "MLB"},
{"home_team": "San Diego Padres", "away_team": "Washington Nationals", "event_time": "2019-06-08 02:10:00", "home_odds": 1.87, "away_odds": 1.97, "last_update": "2019-06-06 20:48:16", "league": "MLB"},
{"home_team": "San Francisco Giants", "away_team": "Los Angeles Dodgers", "event_time": "2019-06-08 02:15:00", "home_odds": 2.85, "away_odds": 1.44, "last_update": "2019-06-06 20:48:16", "league": "MLB"}
]

MySpider:

from scrapy.spiders import Spider
from ..items import MatchItem
import json
import datetime
import dateutil.parser

class MySpider(Spider):
    name = 'first_spider'

    start_urls = ["https://websiteXYZ.com"]

    def parse(self, response):
        item = MatchItem()

        timestamp = datetime.datetime.utcnow()

        response_json = json.loads(response.body)

        for event in response_json["el"]:
            for team in event["epl"]:
                if team["so"] == 1: item["home_team"] = team["pn"]
                if team["so"] == 2: item["away_team"] = team["pn"]

            for market in event["ml"]:
                if market["mn"] == "Match result":
                    item["event_time"] = dateutil.parser.parse(market["dd"]).replace(tzinfo=None)
                    for outcome in market["msl"]:
                        if outcome["mst"] == "1": item["home_odds"] = outcome["msp"]
                        if outcome["mst"] == "X": item["draw_odds"] = outcome["msp"]
                        if outcome["mst"] == "2": item["away_odds"] = outcome["msp"]

                if market["mn"] == 'Moneyline':
                    item["event_time"] = dateutil.parser.parse(market["dd"]).replace(tzinfo=None)
                    for outcome in market["msl"]:
                        if outcome["mst"] == "1": item["home_odds"] = outcome["msp"]
                        #if outcome["mst"] == "X": item["draw_odds"] = outcome["msp"]
                        if outcome["mst"] == "2": item["away_odds"] = outcome["msp"]


            item["last_update"] = timestamp
            item["league"] = event["scn"]

            yield item

编辑:

根据以下答案,我尝试了以下两个脚本:

controller.py

import json
from scrapy import signals
from scrapy.crawler import CrawlerRunner
from twisted.internet import reactor, defer
from betsson_controlled.spiders.betsson import Betsson_Spider
from scrapy.utils.project import get_project_settings


class MyCrawlerRunner(CrawlerRunner):
    def crawl(self, crawler_or_spidercls, *args, **kwargs):
        # keep all items scraped
        self.items = []

        # create crawler (Same as in base CrawlerProcess)
        crawler = self.create_crawler(crawler_or_spidercls)

        # handle each item scraped
        crawler.signals.connect(self.item_scraped, signals.item_scraped)

        # create Twisted.Deferred launching crawl
        dfd = self._crawl(crawler, *args, **kwargs)

        # add callback - when crawl is done cal return_items
        dfd.addCallback(self.return_items)
        return dfd

    def item_scraped(self, item, response, spider):
        self.items.append(item)

    def return_items(self, result):
        return self.items

def return_spider_output(output):
    return json.dumps([dict(item) for item in output])

settings = get_project_settings()
runner = MyCrawlerRunner(settings)
spider = Betsson_Spider()
deferred = runner.crawl(spider)
deferred.addCallback(return_spider_output)


reactor.run()
print(deferred)

执行controller.py时,我得到:

<Deferred at 0x7fb046e652b0 current result: '[{"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}]'>

1 个答案:

答案 0 :(得分:1)

您需要修改CrawlerRunner,并使用信号和/或回调将该项传递到CrawlerRunner中的脚本中。

How to integrate Flask & Scrapy?如果您查看顶部答案中的选项,则使用klein扭曲和刮擦的示例,这就是您要查找的示例,因为它执行相同的操作,只是将其发送到Klein http服务器,然后爬网。您可以使用CrawlerRunner设置类似的方法,以在爬网时将每个项目发送到脚本。注意:收集项目后,此特定问题会将结果发送到Klein Web服务器。答案是使API收集结果并等待爬网完成并将其作为转储发送到JSON,但是您可以将这种方法应用于您的情况。主要要看的是如何对CrawlerRunner进行子类化和扩展以添加额外的功能。

您要执行的操作是执行一个单独的脚本,该脚本会导入Spider并扩展CrawlerRunner。然后,您执行此脚本,它将启动Twisted反应堆并使用定制化的运行器开始爬网过程。

也就是说-这个问题可能可以在项目管道中解决。创建自定义项目管道,并将该项目传递到脚本中,然后再返回该项目。

# main.py

import json
from scrapy import signals
from scrapy.crawler import CrawlerRunner
from twisted.internet import reactor, defer # import we missed
from myproject.spiders.mymodule import MySpiderName
from scrapy.utils.project import get_project_settings


class MyCrawlerRunner(CrawlerRunner):
    def crawl(self, crawler_or_spidercls, *args, **kwargs):
        # keep all items scraped
        self.items = []

        crawler = self.create_crawler(crawler_or_spidercls)

        crawler.signals.connect(self.item_scraped, signals.item_scraped)

        dfd = self._crawl(crawler, *args, **kwargs)

        dfd.addCallback(self.return_items)
        return dfd

    def item_scraped(self, item, response, spider):
        self.items.append(item)

    def return_items(self, result):
        return self.items


def return_spider_output(output):
    return json.dumps([dict(item) for item in output])


settings = get_project_settings()
runner = MyCrawlerRunner(settings)
spider = MySpiderName()
deferred = runner.crawl(spider)
deferred.addCallback(return_spider_output)


# You're items are now in deferred
reactor.run() # we have to actually start the reactor...

请注意,这是一种伪代码,但是如果将所有myproject和myspiders更改为代码,则其背后的原理应该起作用。对于您的原始答案,尽管对于多个蜘蛛,您可能想要做这样的事情。

# Taken from Scrapy docs about running multiple spiders with a customized runner.
@defer.inlineCallbacks
def crawl():
    yield runner.crawl(MySpider1)
    yield runner.crawl(MySpider2)
    reactor.stop()

crawl()
reactor.run() # the script will block here until the last crawl call is finished

参考文献: