from bs4 import BeautifulSoup
from selenium import webdriver
#import urllib2
import time
driver = webdriver.Chrome()
driver.maximize_window()
driver.get("https://www.zillow.com/homes/recently_sold/Culver-City-CA/house,condo,apartment_duplex,townhouse_type/20432063_zpid/51617_rid/12m_days/globalrelevanceex_sort/34.048605,-118.340178,33.963223,-118.47785_rect/12_zm/")
time.sleep(3)
driver.find_element_by_class_name("collapsible-header").click()
soup = BeautifulSoup(driver.page_source,"lxml")
region = soup.find("div",{"id":"hdp-price-history"})
table = region.find('table',{'class':'zsg-table yui3-toggle-content-minimized'})
print table
答案 0 :(得分:2)
以下使用requests
和BeautifulSoup
来获取数据,不需要硒(因此速度很快)。
from bs4 import BeautifulSoup
import requests
import re
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0"}
r = requests.get("https://www.zillow.com/homes/recently_sold/Culver-City-CA/house,condo,apartment_duplex,townhouse_type/20432063_zpid/51617_rid/12m_days/globalrelevanceex_sort/34.048605,-118.340178,33.963223,-118.47785_rect/12_zm/", headers=headers)
urls = re.findall(re.escape('AjaxRender.htm?') + '(.*?)"', r.content)
url = "https://www.zillow.com/AjaxRender.htm?{}".format(urls[4])
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.content.replace('\\', ''), "html.parser")
data = []
for tr in soup.find_all('tr'):
data.append([td.text for td in tr.find_all('td')])
for row in data[:5]: # Show first 5 entries
print row
这显示前5个条目是:
[u'06/16/17', u'Sold', u'$940,000-0.9%', u'K. Miller, A. Masket', u'']
[u'06/14/17', u'Price change', u'$949,000-1.0%', u'', u'']
[u'05/08/17', u'Pending sale', u'$959,000', u'', u'']
[u'04/17/17', u'Price change', u'$959,000+1.1%', u'', u'']
[u'02/27/17', u'Pending sale', u'$949,000', u'', u'']
第一个GET中不存在所需的HTML,但在Price / Tax History
部分展开时,它会按需生成。这会在浏览器中触发AJAX请求。代码在初始HTML中搜索所有这些请求并发出相同的请求。第四个这样的请求用于获取所需的部分。返回的HTML需要删除\
,然后可以传递给BeautifulSoup以作为表进行解析。
答案 1 :(得分:0)
您不需要使用BeautifulSoup。您可以使用以下代码获取所需的表格:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait as wait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome()
driver.maximize_window()
driver.get("https://www.zillow.com/homes/recently_sold/Culver-City-CA/house,condo,apartment_duplex,townhouse_type/20432063_zpid/51617_rid/12m_days/globalrelevanceex_sort/34.048605,-118.340178,33.963223,-118.47785_rect/12_zm/")
wait(driver, 10).until(EC.element_to_be_clickable((By.CLASS_NAME, "collapsible-header"))).click()
table = wait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, "div#hdp-price-history table.zsg-table.yui3-toggle-content-minimized")))
print(table.text)
必需的表以dinamically方式生成,因此您需要等待一段时间,直到它出现在DOM中。这就是为什么在点击
之后你无法在页面源中找到表格的原因