使用urllib.request获取库存数据

时间:2011-12-18 00:21:25

标签: python multithreading performance urllib2

我编写了以下代码来检索S& P 500中股票的数据。代码有效,但由于urlopen请求的数量很慢。我可以用什么策略来加快速度?

from urllib.request import urlopen
import csv


class StockQuote:
    """gets stock data from Yahoo Finance"""

    def __init__(self, quote):
        self.quote = quote

    def lastPrice(self):
        url = 'http://finance.yahoo.com/d/quotes.csv?s={ticker}&f=l1'.format(ticker=self.quote)
        return bytes.decode((urlopen(url).read().strip()))

    def volume(self):
        url = 'http://finance.yahoo.com/d/quotes.csv?s={ticker}&f=v0'.format(ticker=self.quote)
        return bytes.decode((urlopen(url).read().strip()))

    def yearrange(self):
        url = 'http://finance.yahoo.com/d/quotes.csv?s={ticker}&f=w0'.format(ticker=self.quote)
        return bytes.decode((urlopen(url).read().strip()))

    def PEratio(self):
        url = 'http://finance.yahoo.com/d/quotes.csv?s={ticker}&f=r0'.format(ticker=self.quote)
        return bytes.decode((urlopen(url).read().strip()))

    def bookValue(self):
        url = 'http://finance.yahoo.com/d/quotes.csv?s={ticker}&f=b4'.format(ticker=self.quote)
        return bytes.decode((urlopen(url).read().strip()))

    def EBITDA(self):
        url = 'http://finance.yahoo.com/d/quotes.csv?s={ticker}&f=j4'.format(ticker=self.quote)
        return bytes.decode((urlopen(url).read().strip()))

    def PEGRatio(self):
        url = 'http://finance.yahoo.com/d/quotes.csv?s={ticker}&f=r5'.format(ticker=self.quote)
        return bytes.decode((urlopen(url).read().strip()))

    def ticker(self):
        url = 'http://finance.yahoo.com/d/quotes.csv?s={ticker}&f=s0'.format(ticker=self.quote)
        return bytes.decode((urlopen(url).read().strip()))


def openSP500file():
    SP500 = csv.reader(open(r'C:\Users\dev\Desktop\SP500.csv', 'r'), delimiter=',')
    for x in SP500:
        indStk = x[0]
        printdata(indStk)

def printdata(stk):
    stkObj = StockQuote(stk)
    stkdata= {}
    stkdata['Ticker'] = stkObj.ticker()
    stkdata['Price'] = stkObj.lastPrice()
    stkdata['PE Ratio'] = stkObj.PEratio()
    stkdata['Volume'] = stkObj.volume()
    stkdata['Year Range'] = stkObj.yearrange()
    stkdata['Book Value per Share'] = stkObj.bookValue()
    stkdata['EBITDA'] = stkObj.EBITDA()
    stkdata['PEG Ratio'] = stkObj.PEGRatio()
    print(stkdata)  

def main():
    openSP500file()


if __name__ == '__main__':
    main()

谢谢!

3 个答案:

答案 0 :(得分:3)

如果您的所有请求都转到同一个域,我建议您使用urllib3。它不在标准的python安装中,但它实现了连接池,因此所有单个请求都更快。

答案 1 :(得分:3)

您可以通过拨打request.urlopen

来请求多个股票的信息
import urllib.request as request
import urllib.parse as parse
import csv
import codecs
import pprint

def printdata(stks):
    params = parse.urlencode((('s', '+'.join(stks)), ('f', 'sl1rvwb4j4r5')))
    url = 'http://finance.yahoo.com/d/quotes.csv'
    url = '?'.join((url, params))
    req = request.urlopen(url)
    f = codecs.getreader('utf8')(req)
    fields = '''Ticker Price PE_Ratio Volume Year_Range Book_Value_per_Share
              EBITDA PEG_Ratio'''.split()
    for row in csv.reader(f):
        stkdata = dict(zip(fields, row))        
        pprint.pprint(stkdata)

printdata('YHOO GOOG MSFT'.split())

产量

{'Book_Value_per_Share': '10.051',
 'EBITDA': '1.406B',
 'PEG_Ratio': '1.47',
 'PE_Ratio': '18.56',
 'Price': '14.96',
 'Ticker': 'YHOO',
 'Volume': '32625192',
 'Year_Range': '11.09 - 18.84'}
{'Book_Value_per_Share': '169.355',
 'EBITDA': '13.446B',
 'PEG_Ratio': '0.89',
 'PE_Ratio': '21.12',
 'Price': '625.96',
 'Ticker': 'GOOG',
 'Volume': '4459782',
 'Year_Range': '473.02 - 642.96'}
{'Book_Value_per_Share': '7.062',
 'EBITDA': '30.146B',
 'PEG_Ratio': '0.98',
 'PE_Ratio': '9.29',
 'Price': '26.00',
 'Ticker': 'MSFT',
 'Volume': '101410080',
 'Year_Range': '23.65 - 29.46'}

答案 2 :(得分:2)

您可以使用threadingmultiprocessing模块同时获取所有这些网址,这样您就可以节省大量时间,因为提取的所有内容都与其他人无关。