尝试从此URL将多个页面.xlsx下载到R中。
https://niti.gov.in/best-practices/district-wise-statistics
已阅读url,可以一次下载一页,但想一次导入所有页面。
pg <- read_html("https://niti.gov.in/best-practices/district-wise-statistics/")
pg %>%
html_nodes("a") %>%
html_attr("href") %>%
str_subset("\\.xlsx")
答案 0 :(得分:0)
类似的东西可能会为您带来所需的东西。
library(rvest)
library(dplyr)
temp.fldr <- paste0(tempdir(), '/')
base.url <- 'https://niti.gov.in/best-practices/district-wise-statistics'
last <- read_html(base.url) %>%
html_nodes(xpath = "//a[contains(text(), 'last ')]") %>%
html_attr('href')
pg.cnt = as.numeric(sub('.*=', '', last))
for(i in 0:pg.cnt){
page <- paste0(base.url, '?term_node_tid_depth=All&page=', i)
print(paste('Processing page:', i + 1, 'of', pg.cnt + 1))
links <- read_html(page) %>%
html_nodes(xpath = "//a[contains(text(), 'xlsx')]")
files <- lapply(links, function(x){
fpath <- paste0(temp.fldr, trimws(sub("^.*?\\.", '', html_text(x))))
link <- paste0('https://niti.gov.in', html_attr(x, 'href'))
download.file(link, fpath, mode = 'wb')
print(fpath)
return(fpath)
})
}