我编写了一个网络抓取工具,可以从搜索结果的第一页抓取名称和价格。我必须单击下一页链接以使用selenium webdriver到达下一页但我不知道在下一页链接的代码中放入哪些内容并调用它来从下一页中抓取内容。请帮忙
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time, re
import time
from scrapy.item import Item, Field
from selenium import webdriver
from scrapy.spider import BaseSpider
from scrapy.contrib.spiders import CrawlSpider, Rule
from bs4 import BeautifulSoup
import urllib2
import sys;
reload(sys);
sys.setdefaultencoding("utf8")
class Agoda(CrawlSpider):
name = 'agoda'
allowed_domains = ["agoda.com"]
start_urls = ["http://www.agoda.com"]
driver = webdriver.Firefox()
driver.get("http://www.agoda.com")
driver.find_element_by_id("ctl00_ctl00_MainContent_area_promo_HomeSearchBox1_TextSearch1_searchText").clear()
driver.find_element_by_id("ctl00_ctl00_MainContent_area_promo_HomeSearchBox1_TextSearch1_searchText").send_keys("Mumbai")
driver.find_element_by_xpath("//select[contains(@id,'ddlCheckInDay')]")
driver.find_element_by_xpath("//option[contains(.,'Mon 23')]").click()
driver.find_element_by_id("ctl00_ctl00_MainContent_area_promo_HomeSearchBox1_SearchButton").click()
driver.find_element_by_id("ctl00_ContentMain_rptAB1936_ctl01_rptSearchResultAB1936_ctl01_lnkResult1936").click()
#driver.find_element_by_id("ctl00_ContentMain_rptSearchResult_ctl01_lnkResult").click()
time.sleep(40);
#print driver.page_source
TotalResults = driver.find_element_by_xpath("//span[@class='blue ssr_search_text']")
print TotalResults.text
html_source = driver.page_source
soup = BeautifulSoup(html_source)
names = soup("a", {"class":"hot_name"})
prices = soup("span", {"class":"fontxlargeb purple"})
hotel_names = [name[1].get_text() for name in enumerate(names)] #or [name[1].get_text() for name in enumerate(names)]
prices = [price[1].get_text() for price in enumerate(prices)]
name_price_list = itertools.izip_longest(hotel_names, prices)
for name, price in name_price_list:
print name, price
driver.find_element_by_xpath("//a[contains(.,'Next page >')]").click() #next page link
我有下一页的webdriver代码链接driver.find_element_by_xpath(“// a [contains(。,'Next page>')]”)。click() 但不知道把它放在哪里并制作一个循环,以便爬虫遍历所有页面并提取价格和名称