如何在熊猫中修复“ KeyError:”日期”

时间:2019-04-22 14:03:56

标签: python pandas datetime

我正在尝试开始对算法交易进行编程,但是我无法真正解决此问题。有人看到我的错误了吗?

起初它可以工作,但是过了一会儿它说:“ KeyError:“ Date””

import bs4 as bs
import datetime as dt 
import os
import pandas as pd 
import pandas_datareader.data as web
import pickle 
import requests 

def save_sp500_tickers():
    resp = 
requests.get("https://en.wikipedia.org/wiki/List_of_S%26P_500_companies")
    soup = bs.BeautifulSoup(resp.text, "lxml")
    table = soup.find("table", {"class": "wikitable sortable"})
    tickers = []
    for row in table.findAll("tr") [1:]:
        ticker = row.findAll("td") [1].text
        mapping = str.maketrans(".","-")
        ticker = ticker.translate(mapping)
        tickers.append(ticker)  

    with open("save_sp500_tickers.pickle", "wb") as f:
        pickle.dump(tickers, f)

    print(tickers)

    return tickers

save_sp500_tickers()    


def get_data_from_yahoo(reload_sp500 = False):
    if reload_sp500:
        tickers = save_sp500_tickers()
    else:
        with open("save_sp500_tickers.pickle", "rb") as f:
            tickers = pickle.load(f)


if not os.path.exists("stock_dfs"):
        os.makedirs("stock_dfs")

    start = dt.datetime(2000, 1, 1)
    end = dt.datetime(2016, 12, 31)

    for ticker in tickers:
        print(ticker)
        if not os.path.exists("stock_dfs/{}.csv".format(ticker)):
            df = web.DataReader(ticker, "yahoo", start, end)
            df.to_csv("stock_dfs/{}.csv".format(ticker))
        else:
            print("Already have {}".format(ticker)) 

get_data_from_yahoo()

它必须创建一个包含s&p500中公司所有符号的文件,然后为每个公司创建一个文件,以显示自2000年1月1日(至2016年12月31日)的股票信息。 第一部分工作,第二部分也与前159家公司合作。然后出现KeyError:“日期”。

1 个答案:

答案 0 :(得分:0)

import bs4 as bs
import datetime as dt
import os #create a nwe directory
import pandas as pd
from pandas_datareader import data as pdr
import pickle
import requests
import fix_yahoo_finance as yf

yf.pdr_override

def save_sp500_tickers():
    resp=requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
    soup=bs.BeautifulSoup(resp.text, "lxml")
    table=soup.find('table',{'class':'wikitable sortable'})
    tickers=[]
    for row in table.findAll('tr')[1:]:
        ticker=row.findAll('td')[0].text.replace('.','-')
        ticker=ticker[:-1]
        tickers.append(ticker)

    with open('sp500tickers.pickle','wb') as f:
        pickle.dump(tickers, f)
    #print(tickers)
    return tickers
#save_sp500_tickers()


def get_data_from_yahoo(reload_sp500=False):
    if reload_sp500:
        tickers=save_sp500_tickers()
    else:
        with open('sp500tickers.pickle','rb') as f:
            tickers = pickle.load(f)
    if not os.path.exists('stock_dfs'):
        os.makedirs('stock_dfs')

    start=dt.datetime(2010,12,8) 
    end=dt.datetime.now()

    for ticker in tickers: #se non vogliamo tutte le 500 azioni possiamo scrivere
        #for ticker in tickers [:100] ad esempio per prendere le prime 100
        print(ticker)
        if not os.path.exists('stock_dfs/{}.csv'.format(ticker)):
            df=pdr.get_data_yahoo(ticker, start, end)
            df.reset_index(inplace=True)
            df.set_index('Date', inplace=False)
            df.to_csv('stock_dfs/{}.csv'.format(ticker))
        else:
            print('Already have {}'.format(ticker))

save_sp500_tickers()
get_data_from_yahoo()