Python selenium跳过页面如果消息元素不可见跳过

时间:2018-04-22 20:14:06

标签: python-3.x selenium error-handling web-scraping

我正在尝试从页面中获取文本元素。为了获得这个元素,我的scrips点击了页面上的两个过滤器。我需要爬5000页。该脚本在收集文本元素方面起作用,但是,在一定数量的页面之后,它总是返回一个消息"元素不可见"。我假设它是由于页面没有及时加载的事实,因为我检查了它打破的页面,文本元素在那里。 (每次点击后我都有time.sleep(3)。)如果我的脚本没有及时加载,我可以在脚本中使用什么来跳过该页面?

def yelp_scraper(url):
    driver.get(url)
    # get total number of restaurants 
    total_rest_loc = '//span[contains(text(),"Showing 1")]'
    total_rest_raw = driver.find_element_by_xpath(total_rest_loc).text
    total_rest = int(re.sub(r'Showing 1.*of\s','',total_rest_raw))

    button1 = driver.find_element_by_xpath('//span[@class="filter-label filters-toggle js-all-filters-toggle show-tooltip"]')
    button1.click()
    time.sleep(1)

    button2 = driver.find_element_by_xpath('//span[contains(text(),"Walking (1 mi.)")]')
    button2.click()
    time.sleep(2)

    rest_num_loc = '//span[contains(text(),"Showing 1")]'
    rest_num_raw = driver.find_element_by_xpath(rest_num_loc).text
    rest_num = int(re.sub(r'Showing 1.*of\s','',rest_num_raw))

    if total_rest==rest_num:

        button3 = driver.find_element_by_xpath('//span[contains(text(),"Biking (2 mi.)")]')
        button3.click()
        time.sleep(2)

        button4 = driver.find_element_by_xpath('//span[contains(text(),"Walking (1 mi.)")]')
        button4.click()
        time.sleep(2)

        rest_num_loc = '//span[contains(text(),"Showing 1")]'
        rest_num_raw = driver.find_element_by_xpath(rest_num_loc).text
        rest_num = int(re.sub(r'Showing 1.*of\s','',rest_num_raw))


    return(rest_num)


chromedriver = "/Applications/chromedriver" # path to the chromedriver executable
os.environ["webdriver.chrome.driver"] = chromedriver


chrome_options = Options()
# add headless mode
chrome_options.add_argument("--headless")
# turn off image loading
prefs = {"profile.managed_default_content_settings.images":2}
chrome_options.add_experimental_option("prefs",prefs)

driver = webdriver.Chrome(chromedriver, chrome_options=chrome_options)


for url in url_list:
    yelp_data[url] = yelp_scraper(url)

json.dump(yelp_data, open('../data/yelp_json/yelp_data.json', 'w'), indent="\t")


driver.close()

1 个答案:

答案 0 :(得分:1)

  • 示例:

        from selenium.common.exceptions import NoSuchElementException
        for item in driver.find_elements_by_class_name('item'):
            try:
            model = item.find_element_by_class_name('product-model')
            price = item.find_element_by_class_name('product-display-price')
            title = item.find_element_by_class_name('product-title')
            url = item.find_element_by_class_name('js-detail-link')
    
            items.append({'model': model, 'price': price, 'title': title, 'url': url})
            print (model.text, price.text, title.text, url.get_attribute("href"))
            c = (model.text, price.text, title.text, url.get_attribute("href"))
            a.writerow(c)
        except NoSuchElementException:
    #here you can do what you want to do when an element is not found. Then it'll continue with the next one. 
    b.close()