当我尝试从整个S&P 500中制作CSV文件时,我收到此错误消息:
Exception has occurred: pandas_datareader._utils.RemoteDataError
没有使用YahooDailyReader提取3M公司的数据
我认为这有问题:
for row in table.findAll('tr') [1:]:
ticker = row.findAll('td')[0:].text
有人可以帮我吗?提前致谢。
完整代码-
import bs4 as bs
import datetime as dt
import os
import pandas_datareader.data as web
import pickle
import requests
def save_sp500_tickers():
resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable'})
tickers = []
for row in table.findAll('tr') [1:]:
ticker = row.findAll('td')[0:].text
tickers.append(ticker)
with open("sp500tickers.pickle", "wb") as f:
pickle.dump(tickers, f)
return tickers
# save_sp500_tickers()
def get_data_from_yahoo(reload_sp500=False):
if reload_sp500:
tickers = save_sp500_tickers()
else:
with open("sp500tickers.pickle", "rb") as f:
tickers = pickle.load(f)
if not os.path.exists('stock_dfs'):
os.makedirs('stock_dfs')
start = dt.datetime(2010, 1, 1)
end = dt.datetime.now()
for ticker in tickers:
# just in case your connection breaks, we'd like to save our progress!
if not os.path.exists('stock_dfs/{}.csv'.format(ticker)):
df = web.DataReader(ticker, 'yahoo', start, end)
df.reset_index(inplace=True)
df.set_index("Date", inplace=True)
df = df.drop("Symbol", axis=1)
df.to_csv('stock_dfs/{}.csv'.format(ticker))
else:
print('Already have {}'.format(ticker))
get_data_from_yahoo()
答案 0 :(得分:2)
您将需要考虑不再存在的公司,与您的开始和结束参数不兼容的时间轴,或者被yahoo模块识别的时间轴。这对我来说很好
failed = []
passed = []
data = pd.DataFrame()
for x in s&p_symbols:
try:
data[x] = web.DataReader(x, data_source= "yahoo", start = "2019-1-1")["Adj Close"]
passed.append(x)
except (IOError, KeyError):
msg = 'Failed to read symbol: {0!r}, replacing with NaN.'
failed.append(x)
答案 1 :(得分:1)
代码中有许多过期的部分。我发现的解决方案要求使用以下命令安装fix_yahoo_finance和yfinance:
pip install yfinance
pip install fix_yahoo_finance
这似乎对我有用,下面是完整代码。
import bs4 as bs
import datetime as dt
import os
from pandas_datareader import data as pdr
import pickle
import requests
import fix_yahoo_finance as yf
yf.pdr_override
def save_sp500_tickers():
resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable'})
tickers = []
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[0].text.replace('.', '-')
ticker = ticker[:-1]
tickers.append(ticker)
with open("sp500tickers.pickle", "wb") as f:
pickle.dump(tickers, f)
return tickers
# save_sp500_tickers()
def get_data_from_yahoo(reload_sp500=False):
if reload_sp500:
tickers = save_sp500_tickers()
else:
with open("sp500tickers.pickle", "rb") as f:
tickers = pickle.load(f)
if not os.path.exists('stock_dfs'):
os.makedirs('stock_dfs')
start = dt.datetime(2019, 6, 8)
end = dt.datetime.now()
for ticker in tickers:
print(ticker)
if not os.path.exists('stock_dfs/{}.csv'.format(ticker)):
df = pdr.get_data_yahoo(ticker, start, end)
df.reset_index(inplace=True)
df.set_index("Date", inplace=True)
df.to_csv('stock_dfs/{}.csv'.format(ticker))
else:
print('Already have {}'.format(ticker))
save_sp500_tickers()
get_data_from_yahoo()
答案 2 :(得分:0)
从Wikipedia进行抓取将“ MMM / n”返回到泡菜文件中。
添加
ticker = ticker[:-1]
到
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[0].text
ticker = ticker[:-1]
tickers.append(ticker)
并重新生成您的泡菜文件。
应将代码置为'MMM'而不是'MMM / n'
答案 3 :(得分:0)
即使根据Luc McCutcheon的响应在编辑代码后仍发生相同的错误,您只需要在一段时间后运行相同的功能get_data_from_yahoo()
。我相信这是因为Yahoo Finance限制了您可以发出的请求数量。
答案 4 :(得分:0)
我使用以下代码来解决它:
failed=[]
passed=[]
def collect_data(data):
mydata = pd.DataFrame()
for t in data:
try:
mydata[t] = wb.DataReader(t,data_source='yahoo',start='01-10-2019')['Adj Close']
passed.append(t)
except (IOError, KeyError):
msg= 'NaN'
failed.append(t)
print(mydata)
return mydata