R使用purrr包刮取Google +网址列表

时间:2018-06-04 12:46:03

标签: r web-scraping rvest purrr rselenium

我正在开展网络抓取项目,旨在从一系列儿童医院中提取Google +评论。我的方法如下:

1)定义Google +网址列表以导航到审核抓取。网址与数据框以及定义医院的其他变量一起存在。

2)与给定网址相关的所有评论的评分,星标数和发布时间。

3)将这些元素保存在数据框中,并将数据框命名为与该网址对应的数据框中的另一个变量。

4)转到下一个网址......依此类推,直到所有网址都被抓取。

目前,代码可以从单个URL中删除。我尝试使用map包中的purrr创建一个函数。然而,它似乎没有起作用,我做错了。

以下是我的尝试,并对每个步骤的目的进行了评论

#Load the necessary libraries
devtools::install_github("ropensci/RSelenium")
library(purrr)
library(dplyr)
library(stringr)
library(rvest)
library(xml2)
library(RSelenium)
#To avoid any SSL error messages
library(httr)
set_config( config( ssl_verifypeer = 0L ) )

定义网址数据

#Now to define the dataframe with the urls
urls_df =data.frame(Name=c("CHKD","AIDHC")
                    ,ID=c("AAWZ12","AAWZ13")
                    ,GooglePlus_URL=c("https://www.google.co.uk/search?ei=fJUKW9DcJuqSgAbPsZ3gDQ&q=Childrens+Hospital+of+the+Kings+Daughter+&oq=Childrens+Hospital+of+the+Kings+Daughter+&gs_l=psy-ab.3..0i13k1j0i22i10i30k1j0i22i30k1l7.8445.8445.0.9118.1.1.0.0.0.0.144.144.0j1.1.0....0...1c.1.64.psy-ab..0.1.143....0.qDMr7IDA-uA#lrd=0x89ba9869b87f1a69:0x384861b1e3a4efd3,1,,,",
                                      "https://www.google.co.uk/search?q=Alfred+I+DuPont+Hospital+for+Children&oq=Alfred+I+DuPont+Hospital+for+Children&aqs=chrome..69i57.341j0j8&sourceid=chrome&ie=UTF-8#lrd=0x89c6fce9425c92bd:0x80e502f2175fb19c,1,,,"
                                      ))

创建功能

extract_google_review=function(googleplus_urls) {

  #Opens a Chrome session
  rmDr=rsDriver(browser = "chrome",check = F)
  myclient= rmDr$client

  #Creates a sub-dataframe for the filtered hospital, which I will later use to name the dataframe
  urls_df_sub=urls_df %>% filter(GooglePlus_URL %in% googleplus_urls)

  #Navigate to the url
  myclient$navigate(googleplus_urls)

  #click on the snippet to switch focus----------
  webEle <- myclient$findElement(using = "css",value = ".review-snippet")
  webEle$clickElement()
  # Save page source
  pagesource= myclient$getPageSource()[[1]]

  #simulate scroll down for several times-------------
  count=read_html(pagesource) %>%
    html_nodes(".p13zmc") %>%
    html_text()

  #Stores the number of reviews for the url, so we know how many times to scroll down
  scroll_down_times=count %>%
    str_sub(1,nchar(count)-5) %>%
    as.numeric()

  for(i in 1 :scroll_down_times){
    webEle$sendKeysToActiveElement(sendKeys = list(key="page_down"))
    #the content needs time to load,wait 1.2 second every 5 scroll downs
    if(i%%5==0){
      Sys.sleep(1.2)
    }
  }

  #loop and simulate clicking on all "click on more" elements-------------
  webEles <- myclient$findElements(using = "css",value = ".review-more-link")
  for(webEle in webEles){
    tryCatch(webEle$clickElement(),error=function(e){print(e)})
  }

  pagesource= myclient$getPageSource()[[1]]
  #this should get the full review, including translation and original text
    reviews=read_html(pagesource) %>%
    html_nodes(".review-full-text") %>%
    html_text()

  #number of stars
  stars <- read_html(pagesource) %>%
    html_node(".review-dialog-list") %>%
    html_nodes("g-review-stars > span") %>%
    html_attr("aria-label")

  #time posted
  post_time <- read_html(pagesource) %>%
    html_node(".review-dialog-list") %>%
    html_nodes(".dehysf") %>%
    html_text()

  #Consolidating everything into a dataframe
  reviews=head(reviews,min(length(reviews),length(stars),length(post_time)))
  stars=head(stars,min(length(reviews),length(stars),length(post_time))) 
  post_time=head(post_time,min(length(reviews),length(stars),length(post_time)))
  reviews_df=data.frame(review=reviews,rating=stars,time=post_time)

  #Assign the dataframe a name based on the value in column 'Name' of the dataframe urls_df, defined above
  df_name <- tolower(urls_df_sub$Name)

  if(exists(df_name)) {
    assign(df_name, unique(rbind(get(df_name), reviews_df)))
  } else {
    assign(df_name, reviews_df)
  }


} #End function

将网址提供给功能

#Now that the function is defined, it is time to create a vector of urls and feed this vector into the function
googleplus_urls=urls_df$GooglePlus_URL
googleplus_urls %>% map(extract_google_review)

函数中似乎存在错误,这会阻止它将数据抓取并将数据存储到预期的单独数据框中。

我的预期输出

  

2个数据帧,每个数据帧有3列

关于如何改进这一点的任何指示将不胜感激。

0 个答案:

没有答案