我使用selenium
和Scrapy
从here抓取每个信息。
我需要查看每个公司名称,当我到达公司信息页面时,我需要从公司信息页面中提取信息以及我需要打开营销联系人页面并提取来自那里的信息。 Scrapy's Requests
会打开公司信息页面,但在我尝试打开营销联系人页面时它无效。
这是我的代码:
# -*- coding: utf-8 -*-
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
from selenium import webdriver
from scrapy.selector import Selector
from scrapy.http import Request
import time
class HooverSpider(CrawlSpider):
name = "hspider"
allowed_domains = ["hoovers.com"]
start_urls = ["http://www.hoovers.com/company-information/company-search.html?term=australia&maxitems=25&nvcnt=4&nvsls=[5;10L&nvloc=0&nvemp=[11;49]"] #hloru
def __init__(self):
self.driver = webdriver.Firefox()
def parse(self,response):
self.driver.get(response.url)
time.sleep(3)
company = self.driver.find_elements_by_xpath('//div[@class="cmp-company-directory"]/div[1]/table/tbody/tr/td/a')
links = []
for c in company:
links.append(c.get_attribute('href'))
for link in links:
yield Request(str(link),self.parse_link)
def parse_link(self,response):
self.driver.get(response.url)
time.sleep(2)
if (self.driver.find_element_by_xpath('//div[@class="left-content"]/h1').text):
title = self.driver.find_element_by_xpath('//div[@class="left-content"]/h1').text
else:
title = ''
print title
if (self.driver.find_element_by_xpath('//div[@class="left-content"]/p/span[1]').text):
street = self.driver.find_element_by_xpath('//div[@class="left-content"]/p/span[1]').text
else:
street = ''
print street
marketing = self.driver.find_element_by_xpath('//*[@id="fs-comps-A"]/div/div/div/div[1]/div/div[1]/div/ul[2]/li[2]/a').get_attribute('href')
print marketing
return Request(marketing,self.parse_page)
#this one is not working
def parse_page(self,response):
print response.url
self.driver.get(response.url)
time.sleep(3)
print 'hello'
此代码有效。
class HooverSpider(CrawlSpider):
name = "hspider"
allowed_domains = ["hoovers.com"]
start_urls = ["http://www.hoovers.com/company-information/cs/marketing-lists.LAFFORT_AUSTRALIA_PTY_LIMITED.3d01c1d98ad9322f.html"]
def __init__(self):
self.driver = webdriver.Firefox()
def parse(self,response):
self.driver.get(response.url)
time.sleep(3)
marketing = self.driver.find_element_by_xpath('//*[@id="fs-comps-A"]/div/div/div/div[1]/div/div[1]/div/ul[2]/li[2]/a').get_attribute('href')
return Request(marketing,callback=self.parse_page)
def parse_page(self,response):
print 'hh'
答案 0 :(得分:0)
主要问题在于您获得“营销联系人”链接的方式。我会使用find_element_by_link_text()
来通过“营销联系人”链接文本找到链接。
此外,这里很少有注意事项:
self.driver
) - Scrapy是完全异步的,您将很快遇到两种方法在尝试重用相同的驱动程序实例时发生冲突的情况因此,浏览器窗口time.sleep()
这对我有用:
from scrapy.contrib.spiders import CrawlSpider
from scrapy.http import Request
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class HooverSpider(CrawlSpider):
name = "hspider"
...
def parse_link(self, response):
driver = webdriver.Firefox()
driver.get(response.url)
marketing = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.LINK_TEXT, "Marketing Contacts")))
marketing_link = marketing.get_attribute('href')
driver.close()
yield Request(marketing_link, self.parse_page)
def parse_page(self, response):
print "HERE!"
print response.url
print "-----------"
如果您确实需要selenium
,请考虑使用无头PhantomJS
代替Firefox
- 至少可以提高性能并使其在无显示环境中工作(替换{{ 1}}与webdriver.Firefox()
)。