PhantomJS没有提取链接Selenium

时间:2015-09-25 17:26:22

标签: python scroll selenium-webdriver web-scraping phantomjs

我正在使用Selenium,Scrapy和PhantomJS抓取一个网站。代码的问题是,尽管代码完美地滚动页面,但它只提取链接到某个限制。除此之外,它完全忽略了滚动的结果。当我使用Firefox Webdriver时,它运行正常。由于我在服务器中运行代码,我使用了PhantomJS,因此遇到了问题。以下是代码:

# -*- coding: utf-8 -*-

from scrapy.spider import BaseSpider
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import csv
import re
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait


class DukeSpider(BaseSpider):
 name = "dspider"
 allowed_domains = ["dukemedicine.org"]
 start_urls = ["http://www.dukemedicine.org/find-doctors-physicians"]  #hlor


 def __init__(self):
    self.driver = webdriver.PhantomJS(service_args=['--ignore-ssl-errors=true'])
    self.driver.maximize_window()
    print 'here'


 def parse(self, response):

    print 'nowhere'
    print response
    print response.url
    b = open('doc_data_duke.csv', 'a')
    a = csv.writer(b, lineterminator='\n')
    print 'a'

    self.driver.get(response.url)
    time.sleep(10)
    wait = WebDriverWait(self.driver, 10)
    print 'helo'

    click = self.driver.find_element_by_xpath("//span[@id='specialty']")
    click.click()
    click_again = self.driver.find_element_by_xpath("//ul[@class='doctor-type']/li[@class='ng-binding ng-scope'][2]")

    click_again.click()
    time.sleep(25)

    act = ActionChains(self.driver)
    act.move_to_element(self.driver.find_element_by_id('doctor-matrix-section')).click()
    print 'now here'

    for i in range(0, 75):  
        #self.driver.find_element_by_xpath("//div[@id='doctor-matrix-section']").send_keys(Keys.PAGE_DOWN)
        #self.driver.execute_script("window.scrollBy(0, document.body.scrollHeight);")
        #self.driver.find_element_by_tag_name("body").click()
        #self.driver.find_element_by_tag_name("body").send_keys(Keys.PAGE_DOWN)#findElement(By.tagName("body")).sendKeys(Keys.UP);
        #self.driver.find_element_by_tag_name("body").send_keys(Keys.END)
        #bg = self.driver.find_element_by_css_selector('body')

        #bg.send_keys(Keys.SPACE)
        act.send_keys(Keys.PAGE_DOWN).perform()
        time.sleep(2)

        print i
        i += 1

    links = self.driver.find_elements_by_xpath("//div[@class = 'result-information']/div[@class='name']/a")

    for l in links:
        print l
        doc_list = l.get_attribute('href')
        if re.match(r'https:\/\/www\.dukemedicine\.org\/find-doctors-physicians\/#!\/(.*)', doc_list):
            print doc_list
            dr = webdriver.PhantomJS(service_args=['--ignore-ssl-errors=true'])
            dr.maximize_window()

            dr.get(doc_list)

            try:
                name_title = dr.find_element_by_xpath('//div[@class="header1 ng-binding"]').text
                name_titles = name_title.split(",", 1)
                name = name_titles[0].encode('utf-8')

                title = name_titles[1]
                print name.encode('utf-8')
                title = title[1:].encode('utf-8')
                print title.encode('utf-8')
            except:
                name = ''
                title = ''
            try:
                speciality = dr.find_element_by_xpath('//p[@class="specialties ng-scope"]').text

            except:
                speciality = ''

            try:
                language = dr.find_element_by_xpath(
                    '//div[@class="lang ng-scope"]/div[@class="plainText inline ng-binding"]').text
            except:
                language = ''
            if dr.find_elements_by_xpath('//div[@class="location-info"]'):
                locations = dr.find_elements_by_xpath('//div[@class="location-info"]')
                if len(locations) >= 3:
                    locationA = locations[0].text.encode('utf-8')
                    locationA = locationA.replace('Directions', '')
                    locationA = locationA.replace('\n', '')
                    locationB = locations[1].text.encode('utf-8')
                    locationB = locationB.replace('Directions', '')
                    locationB = locationB.replace('\n', '')
                    locationC = locations[2].text.encode('utf-8')
                    locationC = locationC.replace('\n', '')
                    locationC = locationC.replace('Directions', '')
                elif len(locations) == 2:
                    locationA = locations[0].text.encode('utf-8')
                    locationA = locationA.replace('Directions', '')
                    locationA = locationA.replace('\n', '')
                    locationB = locations[1].text.encode('utf-8')
                    locationB = locationA.replace('Directions', '')
                    locationB = locationB.replace('\n', '')
                    locationC = ''
                elif len(locations) == 1:
                    locationA = locations[0].text.encode('utf-8')
                    locationA = locationA.replace('Directions', '')
                    locationA = locationA.replace('\n', '')
                    locationB = ''
                    locationC = ''
            else:
                locationA = ''
                locationB = ''
                locationC = ''

            dr.close()
            data = [title, name, speciality, language, locationA, locationB, locationC]
            print 'aaaa'
            print data
            a.writerow(data)

无论我在该范围内设置了什么更高的值,它都会忽略超出某一点的结果。

1 个答案:

答案 0 :(得分:2)

让我们使用有一个元素具有总结果数的事实

我们的想法是迭代scroll into view最后找到的医生,直到我们所有医生都装满了。

实施(澄清评论,只留下相关的" selenium"特定部分):

# -*- coding: utf-8 -*-
import time

from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException


driver = webdriver.PhantomJS(service_args=['--ignore-ssl-errors=true', '--load-images=false'])
# driver = webdriver.Chrome()
driver.maximize_window()

driver.get("http://www.dukemedicine.org/find-doctors-physicians")

# close optional survey popup if exists
try:
    driver.find_element_by_css_selector("area[alt=close]").click()
except NoSuchElementException:
    pass

# open up filter dropdown
click = driver.find_element_by_id("specialty")
click.click()

# choose specialist
specialist = driver.find_element_by_xpath("//ul[@class = 'doctor-type']/li[contains(., 'specialist')]")
specialist.click()

# artificial delay: TODO: fix?
time.sleep(15)

# read total results count
total_count = int(driver.find_element_by_id("doctor-number").text)

# get the initial results count
results = driver.find_elements_by_css_selector("div.doctor-result")
current_count = len(results)

# iterate while all of the results would not be loaded
while current_count < total_count:
    driver.execute_script("arguments[0].scrollIntoView();", results[-1])

    results = driver.find_elements_by_css_selector("div.doctor-result")
    current_count = len(results)
    print "Current results count: %d" % current_count

# report total results
print "----"
print "Total results loaded: %d" % current_count

driver.quit()

在PhantomJS和Chrome中都能很好地适合我。这是我在控制台上得到的内容:

Current results count: 36
Current results count: 54
Current results count: 72
Current results count: 90
...
Current results count: 1656
Current results count: 1674
Current results count: 1692
Current results count: 1708
----
Total results loaded: 1708

另外请注意,我已经添加了--load-images=false命令行参数,这实际上会大大加快速度。