from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
def races(main_url):
driver = webdriver.Chrome()
driver.get(main_url)
driver.implicitly_wait(2)
races = driver.find_elements_by_class_name('time-location')
races = [race.text[:5] for race in races]
races = [race.replace(':', '') for race in races]
driver.close()
return races
import pandas as pd
def scrape(url):
driver = webdriver.Chrome()
driver.get(url)
driver.implicitly_wait(2)
driver.find_elements_by_class_name('racecard-ajax-link')[1].click()
WebDriverWait(driver,5).until(expected_conditions.presence_of_element_located((By.XPATH, '//* [@id="tab-racecard-sectional-times"]/div/div[1]/div[1]/div[2]/div/button')))
这是我将Web抓取结果存储到变量数据中的位置,因此很难查看导致错误的原因。感谢任何帮助。
data = [main]
for horse in driver.find_elements_by_class_name('card-item'):
horseName = horse.find_element_by_class_name('form-link').text
times = horse.find_elements_by_class_name('sectionals-time')
times = [time.text for time in times]
print('{}: {}'.format(horseName, times))
print()
driver.close()
return data
def main():
date = '6-October-2018'
main_url = 'http://www.attheraces.com/racecard/Wolverhampton/' + date
for race in races(main_url):
url = main_url + '/' + race
print(url)
scrape(url)
if __name__ == '__main__':
main()
这时,webscarper返回了sublime中的所有数据结果,但是由于抛出NameError,它并未保存到csv文件中:在df = pd.DataFrame(data)之后未定义名称“ data”
df = pd.DataFrame(data)
df.to_csv("jan_1")