如何在一个站点表内容中定位元素

时间:2019-06-24 09:18:58

标签: python selenium

我尝试在下面的网站上抓取产品项目的详细信息,但是脚本虽然总是存在,但始终失败,并显示错误no such element。任何人都可以帮助解决该问题?我的代码:

from time import sleep

from scrapy import Spider
from selenium import webdriver
from scrapy.selector import Selector
from scrapy.http import Request
from selenium.common.exceptions import NoSuchElementException
driver = webdriver.Chrome('D:\chromedriver_win32\chromedriver.exe')
driver.get('http://www.tesensors.com/global/en/product/inductive-capacitive/xs-xt-ref')
sleep(5)
#soemtime the site ask you select language and country so need click button as below
sign_in_button = driver.find_element_by_id('edit-submit--4')
sign_in_button.click()
sleep(5)
# scrapy content.total 1168 items, here there is no result.
product_model_name=driver.find_elements_by_xpath('span[@itemprop="name"]')
product_desc=driver.find_elements_by_xpath('span[@itemprop="description"]')

3 个答案:

答案 0 :(得分:1)

iframe中的产品数据

您可以使用XPath来找到:

iframe = driver.find_element_by_xpath("//iframe[@id='ecat']")

然后切换到:

driver.switch_to.frame(iframe)

以下是切换回默认内容(不属于)的方法:

driver.switch_to.default_content()

请勿使用time-sleep模块,请尝试explicit-waits

see差异。

EX:

from scrapy import Spider
from selenium import webdriver
from scrapy.selector import Selector
from scrapy.http import Request
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC


driver = webdriver.Chrome('D:\chromedriver_win32\chromedriver.exe')
driver.get('http://www.tesensors.com/global/en/product/inductive-capacitive/xs-xt-ref')

#soemtime the site ask you select language and country so need click button as below
sign_in_button = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.ID, "edit-submit--4")))
sign_in_button.click()

#switch iframe
iframe = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//iframe[@id='ecat']")))
driver.switch_to.frame(iframe)

# scrapy content.total 1168 items, here there is no result.
product_model_name = driver.find_elements_by_xpath('//span[@itemprop="name"]')
print(product_model_name[0].text)

product_desc=driver.find_elements_by_xpath('//span[@itemprop="description"]')

print(product_model_name[0].text)

答案 1 :(得分:0)

import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By



driver = webdriver.Chrome(r"C:\Users\path\Desktop\chromedriver\chromedriver.exe")
driver.get('http://www.tesensors.com/global/en/product/inductive-capacitive/xs-xt-ref')

try:
    element = WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, "edit-submit--4")))
    element.submit()
except:
    print("proceeding further")

iframe = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//iframe[@id='ecat']")))
driver.switch_to.frame(iframe)
product_model_name = driver.find_elements_by_xpath("//*[@itemprop='name']")
product_model_description = driver.find_elements_by_xpath("//*[@itemprop='description']")
names = []
description = []
for i in product_model_name:
    print(i.text)
    names.append(i.text)
for i in product_model_description:
    print(i.text)
    description.append(i.text)

答案 2 :(得分:0)

我使用这种方法获取元素:

from scrapy import Spider
import os
from selenium import webdriver
import time
from scrapy.selector import Selector
from scrapy.http import Request
from selenium.common.exceptions import NoSuchElementException
chromedriver = pathToDriver + 'chromedriver'
os.environ["webdriver.chrome.driver"] = chromedriver
driver = webdriver.Chrome(chromedriver)
driver.get('http://www.tesensors.com/global/en/product/inductive-capacitive/xs-xt-ref')
time.sleep(3)
#soemtime the site ask you select language and country so need click button as below
sign_in_button = driver.find_element_by_id('edit-submit--4')
sign_in_button.click()
time.sleep(3)
iframe_src = driver.find_element_by_id('ecat').get_attribute("src")
print(iframe_src)
driver.get(iframe_src)
# scrapy content.total 1168 items, here there is no result.
product_model_names=driver.find_elements_by_class_name('boldLevel2')
product_names = list()
for element in product_model_names:
    product_names.append(element.text)
print(product_names)

product_desc=driver.find_elements_by_class_name('level1')
product_descptions = list()
for element in product_desc:
    product_descptions.append(element.text)
print(product_descptions)

driver.close()