尝试循环遍历HTML表并创建数据框

时间:2016-10-26 14:02:31

标签: r web-scraping rvest

我正在尝试创建一个动态循环来运行多个URL并从每个表中抓取数据,将所有内容连接到一个数据框中。我尝试了一些想法,如下图所示,但到目前为止还没有任何工作。这种东西并不是我的驾驶室,但我正在努力学习它是如何工作的。如果有人可以帮我完成这件事,我会非常感激。

谢谢。

静态网址: http://www.nfl.com/draft/2015/tracker?icampaign=draft-sub_nav_bar-drafteventpage-tracker#dt-tabs:dt-by-position/dt-by-position-input:qb

library(rvest)

#create a master dataframe to store all of the results
complete<-data.frame()

yearsVector <- c("2010", "2011", "2012", "2013", "2014", "2015")
positionVector <- c("qb", "rb", "wr", "te", "ol", "dl", "lb", "cb", "s")
for (i in 1:length(yearsVector)) 
{
  for (j in 1:length(positionVector)) 
  {
    # create a url template 
    URL.base<-"http://www.nfl.com/draft/"
    URL.intermediate <- "/tracker?icampaign=draft-sub_nav_bar-drafteventpage-tracker#dt-tabs:dt-by-position/dt-by-position-input:"
    #create the dataframe with the dynamic values
    URL <- paste0(URL.base, yearsVector, URL.intermediate, positionVector)
    #print(URL)

    #read the page - store the page to make debugging easier
    page<- read_html(URL)

    #This needs work since the page is dynamicly generated.
    DF <- html_nodes(page, xpath = ".//table") %>% html_table(fill=TRUE)
    #About 530 names returned, may need to search and extracted requested info.



    # to find the players last names
    lastnames<-str_locate_all(page, "lastName")[[1]]
    names<- str_sub(page, lastnames[,2]+4, lastnames[,2]+20)
    names<-str_extract(names, "[A-Z][a-zA-Z]*")

    length(names[-c(1:16)])
    #Still need to delete the first 16 names (don't know if this is consistent across all years

    #to find the players positions
    positions<-str_locate_all(page, "pos")[[1]]
    ppositions<- str_sub(page, positions[,2]+4, positions[,2]+10)
    pos<-str_extract(ppositions, "[A-Z]*")

    pos<- pos[pos !=""]
    #Still need to clean delete the first 16 names (don't know if this is consistent across all years


    #store the temp values into the master dataframe
    complete<-rbind(complete, DF)
  }
}

我编辑了我的OP以合并您的代码Dave。我想我差不多了,但有些事情在这里并不完全正确。我收到了这个错误。

eval中的错误(替换(expr),envir,enclos):期望单个值

我知道网址是对的!

http://postimg.org/image/ccmvmnijr/

我认为问题在于这一行:

page <- read_html(URL)

或者,也许这一行:

DF <- html_nodes(page, xpath = ".//table") %>% html_table(fill = TRUE)

你能帮助我克服终点线吗?谢谢!

1 个答案:

答案 0 :(得分:0)

试试这个答案!我修复了URL的创建并设置了一个主数据框来存储所请求的信息。该页面是动态生成的,所以使用这些来自rvest的标准工具是行不通的。所有玩家(大约16个领域),大学和选择信息都存储在页面上,这是搜索和提取它的问题。

library(rvest)
library(stringr)
library(dplyr)

#create a master dataframe to store all of the results
complete<-data.frame()

yearsVector <- c( "2011", "2012", "2013", "2014", "2015")
#all position information is stored on each page no need to create sparate queries
#positionVector <- c("qb", "rb", "wr", "te", "ol", "dl", "lb", "cb", "s")
positionVector <- c("qb")
for (i in 1:length(yearsVector)) 
{
  for (j in 1:length(positionVector)) 
  {
    # create a url template 
    URL.base<-"http://www.nfl.com/draft/"
    URL.intermediate <- "/tracker?icampaign=draft-sub_nav_bar-drafteventpage-tracker#dt-tabs:dt-by-position/dt-by-position-input:"
    #create the dataframe with the dynamic values
    URL <- paste0(URL.base, yearsVector[i], URL.intermediate, positionVector[j])
    print(yearsVector[i])
    print(URL)

    #read the page - store the page to make debugging easier
    page<- read_html(URL)

    #This needs work since the page is dynamicly generated.
    #DF <- html_nodes(page, xpath = ".//table") %>% html_table(fill=TRUE)
    #About 539 names returned, may need to search and extracted requested info.
    #find records for each player
    playersloc<-str_locate_all(page, "\\{\"personId.*?\\}")[[1]]
    players<-str_sub(page, playersloc[,1]+1, playersloc[,2]-1)
    #fix the cases where the players are named Jr.
    players<-gsub(", ", "_", players  )

    #split and reshape the data in a data frame
    play2<-strsplit(gsub("\"", "", players), ',')
    data<-sapply(strsplit(unlist(play2), ":"), FUN=function(x){x[2]})
    df<-data.frame(matrix(data, ncol=16, byrow=TRUE))
    #name the column names
    names(df)<-sapply(strsplit(unlist(play2[1]), ":"), FUN=function(x){x[1]})

    #sort out the pick information
    picks<-str_locate_all(page, "\\{\"id.*?player.*?\\}")[[1]]
    picks<-str_sub(page, picks[,1]+1, picks[,2]-1)
    #fix the cases where there are commas in the notes section.
    picks<-gsub(", ", "_", picks  )
    picks<-strsplit(gsub("\"", "", picks), ',')
    data<-sapply(strsplit(unlist(picks), ":"), FUN=function(x){x[2]})
    picksdf<-data.frame(matrix(data, ncol=6, byrow=TRUE))
    names(picksdf)<-sapply(strsplit(unlist(picks[1]), ":"), FUN=function(x){x[1]})

    #sort out the college information
    schools<-str_locate_all(page, "\\{\"id.*?name.*?\\}")[[1]]
    schools<-str_sub(page, schools[,1]+1, schools[,2]-1)
    schools<-strsplit(gsub("\"", "", schools), ',')
    data<-sapply(strsplit(unlist(schools), ":"), FUN=function(x){x[2]})
    schoolsdf<-data.frame(matrix(data, ncol=3, byrow=TRUE))
    names(schoolsdf)<-sapply(strsplit(unlist(schools[1]), ":"), FUN=function(x){x[1]})

    #merge the 3 tables together
    df<-left_join(df, schoolsdf, by=c("college" =  "id"))
    df<-left_join(df, picksdf, by=c("pick" =  "id"))

    #store the temp values into the master dataframe
    complete<-rbind(complete, df)
  }
}

找出正确的正则表达式来查找和找到所需信息很棘手。看起来2010年的数据使用不同的格式使用大学信息,因此我离开了那一年。 另外,请确保您没有违反本网站的服务条款。 祝你好运