我正在尝试从考试准备站点获取几种不同测试的数据。有不同的主题,每个主题都有一个专业,每个主题都有一个实践测试,每个主题都有几个问题。
subject <--- specialization <---- practice-test *------ question
这是我的代码:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pathlib
import time
import json
import os
driver=webdriver.Firefox(executable_path="../../../geckodriver.exe")
wait = WebDriverWait(driver, 15)
data=[]
def setup():
driver.get('https://www.varsitytutors.com/practice-tests')
try:
go_away_1= driver.find_element_by_class_name("ub-emb-iframe")
driver.execute_script("arguments[0].style.visibility='hidden'", go_away_1)
go_away_2= driver.find_element_by_class_name("ub-emb-iframe-wrapper")
driver.execute_script("arguments[0].style.visibility='hidden'", go_away_2)
go_away_3= driver.find_element_by_class_name("ub-emb-visible")
driver.execute_script("arguments[0].style.visibility='hidden'", go_away_3)
except:
pass
def get_subjects(subs=[]):
subject_clickables_xpath="/html/body/div[3]/div[9]/div/*/div[@data-subject]/div[1]"
subject_clickables=driver.find_elements_by_xpath(subject_clickables_xpath)
subject_names=map(lambda x : x.find_element_by_xpath('..').get_attribute('data-subject'), subject_clickables)
subject_pairs=zip(subject_names, subject_clickables)
return subject_pairs
def get_specializations(subject):
specialization_clickables_xpath="//div//div[@data-subject='"+subject+"']/following-sibling::div//div[@class='public_problem_set']//a[contains(.,'Practice Tests')]"
specialization_names_xpath="//div//div[@data-subject='"+subject+"']/following-sibling::div//div[@class='public_problem_set']//a[contains(.,'Practice Tests')]/../.."
specialization_names=map(lambda x : x.get_attribute('data-subject'), driver.find_elements_by_xpath(specialization_names_xpath))
specialization_clickables = driver.find_elements_by_xpath(specialization_clickables_xpath)
specialization_pairs=zip(specialization_names, specialization_clickables)
return specialization_pairs
def get_practices(subject, specialization):
practice_clickables_xpath="/html/body/div[3]/div[8]/div[3]/*/div[1]/a[1]"
practice_names_xpath="//*/h3[@class='subject_header']"
lengths_xpath="/html/body/div[3]/div[8]/div[3]/*/div[2]"
lengths=map(lambda x : x.text, driver.find_elements_by_xpath(lengths_xpath))
print(lengths)
practice_names=map(lambda x : x.text, driver.find_elements_by_xpath(practice_names_xpath))
practice_clickables = driver.find_elements_by_xpath(practice_clickables_xpath)
practice_pairs=zip(practice_names, practice_clickables)
return practice_pairs
def remove_popup():
try:
button=wait.until(EC.element_to_be_clickable((By.XPATH,"//button[contains(.,'No Thanks')]")))
button.location_once_scrolled_into_view
button.click()
except:
print('could not find the popup')
def get_questions(subject, specialization, practice):
remove_popup()
questions=[]
current_question=None
while True:
question={}
try:
WebDriverWait(driver,5).until(EC.presence_of_element_located((By.XPATH,"/html/body/div[3]/div[7]/div[1]/div[2]/div[2]/table/tbody/tr/td[1]")))
question_number=driver.find_element_by_xpath('/html/body/div[3]/div[7]/div[1]/div[2]/div[2]/table/tbody/tr/td[1]').text.replace('.','')
question_pre=driver.find_element_by_class_name('question_pre')
question_body=driver.find_element_by_xpath('/html/body/div[3]/div[7]/div[1]/div[2]/div[2]/table/tbody/tr/td[2]/p')
answer_choices=driver.find_elements_by_class_name('question_row')
answers=map(lambda x : x.text, answer_choices)
question['id']=question_number
question['pre']=question_pre.text
question['body']=question_body.text
question['answers']=list(answers)
questions.append(question)
choice=WebDriverWait(driver,20).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR,"input.test_button")))
driver.execute_script("arguments[0].click();", choice[3])
time.sleep(3)
except Exception as e:
if 'results' in driver.current_url:
driver.get(driver.current_url.replace('http://', 'https://'))
# last question has been answered; record results
remove_popup()
pathlib.Path('data/'+subject+'/'+specialization).mkdir(parents=True, exist_ok=True)
with open('data/'+subject+'/'+specialization+'/questions.json', 'w') as outfile:
json.dump(list(questions), outfile)
break
else:
driver.get(driver.current_url.replace('http://', 'https://'))
return questions
def scrape():
setup()
subjects=get_subjects()
for subject_name, subject_clickable in subjects:
subject={}
subject['name']=subject_name
subject['specializations']=[]
subject_clickable.click()
subject_url=driver.current_url.replace('http://', 'https://')
specializations=get_specializations(subject_name)
for specialization_name, specialization_clickable in specializations:
specialization={}
specialization['name']=specialization_name
specialization['practices']=[]
specialization_clickable.click()
specialization_url=driver.current_url.replace('http://', 'https://')
practices=get_practices(subject_name, specialization_name)
for practice_name, practice_clickable in practices:
practice={}
practice['name']=practice_name
practice_clickable.click()
questions=get_questions(subject_name, specialization_name, practice_name)
practice['questions']=questions
driver.get(specialization_url)
driver.get(subject_url)
data.append(subject)
print(data)
scrape()
运行此命令会产生错误消息:
Traceback (most recent call last):
File "scrape.py", line 141, in <module>
scrape()
File "scrape.py", line 126, in scrape
for practice_name, practice_clickable in practices:
File "scrape.py", line 49, in <lambda>
practice_names=map(lambda x : x.text, driver.find_elements_by_xpath(practice_names_xpath))
File "C:\Users\Joseph\AppData\Local\Programs\Python\Python36\lib\site-packages\selenium\webdriver\remote\webelement.py", line 76, in text
return self._execute(Command.GET_ELEMENT_TEXT)['value']
File "C:\Users\Joseph\AppData\Local\Programs\Python\Python36\lib\site-packages\selenium\webdriver\remote\webelement.py", line 628, in _execute
return self._parent.execute(command, params)
File "C:\Users\Joseph\AppData\Local\Programs\Python\Python36\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 312, in execute
self.error_handler.check_response(response)
File "C:\Users\Joseph\AppData\Local\Programs\Python\Python36\lib\site-packages\selenium\webdriver\remote\errorhandler.py", line 237, in check_response
raise exception_class(message, screen, stacktrace)
selenium.common.exceptions.NoSuchElementException: Message: Web element reference not seen before: 980e5c29-e3af-4b13-979f-0f2bb58b3480
从一个练习测试中获得问题后,驾驶员需要返回到专业化页面,在该页面中可以找到下一个练习测试。因此,这些行(问题出在哪里):
for practice_name, practice_clickable in practices:
practice={}
practice['name']=practice_name
practice_clickable.click()
questions=get_questions(subject_name, specialization_name, practice_name)
practice['questions']=questions
driver.get(specialization_url)
显然,该页面上找不到下一个可点击的练习。为什么不呢?
此外,我不确定这是否值得提出自己的问题,但是我无法让该程序在我的Ubuntu计算机上正常工作-get_questions
函数在最后一个问题上停止并且不会t转到结果页面。
这是我正在尝试的盖伊建议:
def scrape():
setup()
subjects=get_subjects()
for subject_name, subject_clickable in subjects:
subject={}
subject['name']=subject_name
subject['specializations']=[]
subject_clickable.click()
subject_url=driver.current_url.replace('http://', 'https://')
specializations=get_specializations(subject_name)
for specialization_name, specialization_clickable in specializations:
specialization={}
specialization['name']=specialization_name
specialization['practices']=[]
specialization_clickable.click()
specialization_url=driver.current_url.replace('http://', 'https://')
practices=get_practices(subject_name, specialization_name)
practices_len = len(list(get_practices(subject_name, specialization_name)))
for i in range(practices_len):
practices_list = list(get_practices(subject_name, specialization_name))
practice = {}
practice['name'] = practices_list[i][0]
practices_list[i][1].click()
# for practice_name, practice_clickable in practices:
# practice={}
# practice['name']=practice_name
# practice_clickable.click()
# questions=get_questions(subject_name, specialization_name, practice_name)
# practice['questions']=questions
driver.get(specialization_url)
driver.get(subject_url)
data.append(subject)
print(data)
scrape()
编辑:根据休伯特的建议,我尝试了以下方法:
practices = get_practices(subject_name, specialization_name)
practices = [item[0] for item in practices]
for index, practice_name in enumerate(practices):
practice={}
practice['name'] = practice_name
practice_row = driver.find_element_by_xpath('//*[text()="'+practice_name+'"]/..')
practice_clickable_n = practice_row.find_element_by_link_text('Begin')
print('old:', practice_clickable[index])
print('new:', practice_clickable_n)
practice_clickable_n.click()
questions=get_questions(subject_name, specialization_name, practice_name)
这就是结果:
<map object at 0x7fabc0129860>
<map object at 0x7fabc0129898>
Traceback (most recent call last):
File "scrape.py", line 140, in <module>
scrape()
File "scrape.py", line 131, in scrape
print('old:', practice_clickable[index])
IndexError: list index out of range
答案 0 :(得分:2)
此错误消息...
selenium.common.exceptions.NoSuchElementException: Message: Web element reference not seen before: 980e5c29-e3af-4b13-979f-0f2bb58b3480
...表示 GeckoDriver 无法识别 WebElement 。
此错误来自get(webEl, win)
中的Marionette source code:
get(webEl, win) {
if (!(webEl instanceof WebElement)) {
throw new TypeError(pprint`Expected web element, got: ${webEl}`);
}
if (!this.has(webEl)) {
throw new NoSuchElementError(
"Web element reference not seen before: " + webEl.uuid
);
}
在讨论comment中,@fc的'Element reference not seen before: undefined' using geckodriver, waitForElementVisible fails解释了实际的问题:
但是,核心问题已在Intermittent test_navigation.py TestRefresh.test_basic | NoSuchElementException: Failed to trigger opening a new tab: Web element reference not seen before中进行了讨论,随后又通过changeset
得到了解决。使用最新版本的二进制文件可以解决以下问题:
答案 1 :(得分:0)
问题在于practices
上的迭代。它包含WebElement
个字符,但是导航到新页面时,即使实际上是同一页面,Selenium也会将其视为新页面,但它们的引用也会丢失。
您可以通过按索引迭代来解决它。要使用zip
可以做到这一点
practices_len = len(list(get_practices(subject_name, specialization_name)))
for i in range(practices_len):
practices_list = list(get_practices(subject_name, specialization_name))
practice = {}
practice['name'] = practices_list[i][0]
practices_list[i][1].click()
答案 2 :(得分:0)
人是正确的。下次加载specialization_url时,它是一个包含新元素的新页面,但实践中包含旧页面的web元素作为web元素。
要仅更改发生这种情况的部分,下面的代码首先创建练习和Practice_clickables的列表。 然后,只要返回 new specialization_url页面,它就会搜索一个新的clickable,并打印旧的ID和当前的Practice_clickable的ID。 这样就可以清楚地看到,与第一次加载页面相比,同一行中的元素现在已经不同。
此外,map或zip函数似乎创建了一个生成器,因此即使对实践的迭代也失败了,因为在该步骤中,对旧对象执行了webdriver代码。这就是为什么我首先创建列表并遍历列表的原因。
更改的代码段:
practices = get_practices(subject_name, specialization_name)
practice_clickable = [item[1] for item in practices]
practices = get_practices(subject_name, specialization_name)
practices = [item[0] for item in practices]
for index, practice_name in enumerate(practices):
practice={}
practice['name'] = practice_name
practice_row = driver.find_element_by_xpath(f'//*[text()="{practice_name}"]/..')
practice_clickable_n = practice_row.find_element_by_link_text('Begin')
print('old:', practice_clickable[index])
print('new:', practice_clickable_n)
practice_clickable_n.click()
questions=get_questions(subject_name, specialization_name, practice_name)
完整的抓取功能:
def scrape():
setup()
subjects=get_subjects()
for subject_name, subject_clickable in subjects:
subject={}
subject['name']=subject_name
subject['specializations']=[]
subject_clickable.click()
if ('http://') in driver.current_url:
subject_url=driver.current_url.replace('http://', 'https://')
else:
subject_url=driver.current_url
specializations=get_specializations(subject_name)
for specialization_name, specialization_clickable in specializations:
specialization={}
specialization['name']=specialization_name
specialization['practices']=[]
specialization_clickable.click()
if 'http://' in driver.current_url:
specialization_url=driver.current_url.replace('http://', 'https://')
else:
specialization_url=driver.current_url
practices = get_practices(subject_name, specialization_name)
practice_clickable = [item[1] for item in practices]
practices = get_practices(subject_name, specialization_name)
practices = [item[0] for item in practices]
for index, practice_name in enumerate(practices):
practice={}
practice['name'] = practice_name
practice_row = driver.find_element_by_xpath(f'//*[text()="{practice_name}"]/..')
practice_clickable_n = practice_row.find_element_by_link_text('Begin')
print('old:', practice_clickable[index])
print('new:', practice_clickable_n)
practice_clickable_n.click()
questions=get_questions(subject_name, specialization_name, practice_name)
practice['questions']=questions
driver.get(specialization_url)
driver.get(subject_url)
data.append(subject)
print(data)