是否可以在Pyqt4(QwebPage)下执行asynchrone(如asyncio)Web请求?
例如,如何使用此代码并行调用多个网址:
#!/usr/bin/env python3.4
import sys
import signal
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import QWebPage
class Crawler( QWebPage ):
def __init__(self, url):
QWebPage.__init__( self )
self._url = url
self.content = ''
def crawl( self ):
signal.signal( signal.SIGINT, signal.SIG_DFL )
self.connect( self, SIGNAL( 'loadFinished(bool)' ), self._finished_loading )
self.mainFrame().load( QUrl( self._url ) )
def _finished_loading( self, result ):
self.content = self.mainFrame().toHtml()
print(self.content)
sys.exit( 0 )
def main():
app = QApplication( sys.argv )
crawler = Crawler( self._url, self._file )
crawler.crawl()
sys.exit( app.exec_() )
if __name__ == '__main__':
crawl = Crawler( 'http://www.example.com')
crawl.main()
由于
答案 0 :(得分:1)
你不能让self.mainFrame().load(QUrl(self._url))
通过asyncio工作,对不起 - 在Qt本身实现的方法。
但您可以安装quamash事件循环并异步调用aiohttp.request coroutine来获取网页。
但QWebPage
的方法并不适用。
答案 1 :(得分:0)
请求已经异步完成,因此您需要做的就是创建QWebPage
的多个实例。
这是一个基于示例脚本的简单演示:
import sys, signal
from PyQt4 import QtCore, QtGui, QtWebKit
urls = [
'http://qt-project.org/doc/qt-4.8/qwebelement.html',
'http://qt-project.org/doc/qt-4.8/qwebframe.html',
'http://qt-project.org/doc/qt-4.8/qwebinspector.html',
'http://qt-project.org/doc/qt-4.8/qwebpage.html',
'http://qt-project.org/doc/qt-4.8/qwebsettings.html',
'http://qt-project.org/doc/qt-4.8/qwebview.html',
]
class Crawler(QtWebKit.QWebPage):
def __init__(self, url, identifier):
super(Crawler, self).__init__()
self.loadFinished.connect(self._finished_loading)
self._id = identifier
self._url = url
self.content = ''
def crawl(self):
self.mainFrame().load(QtCore.QUrl(self._url))
def _finished_loading(self, result):
self.content = self.mainFrame().toHtml()
print('[%d] %s' % (self._id, self._url))
print(self.content[:250].rstrip(), '...')
print()
self.deleteLater()
if __name__ == '__main__':
app = QtGui.QApplication( sys.argv )
signal.signal( signal.SIGINT, signal.SIG_DFL)
crawlers = []
for index, url in enumerate(urls):
crawlers.append(Crawler(url, index))
crawlers[-1].crawl()
sys.exit( app.exec_() )