美国国家海洋和大气管理局(NOAA)拥有大量标准水文交换格式(SHEF)的数据(例如,如下链接所示)。链接的数据具有四个主要信息:位置名称,位置ID,报告的值(数字或“ NE”-未估计)和海拔区域。我希望将SHEF数据转换为四列data.frame
。 SHEF格式虽然名称上带有“ exchange”,但使用起来似乎并不简单,但是我可能会遗漏一些东西。
下面的链接数据的两个页面都有1137行的snow数据文本,用于相同的位置和时间,但用于不同的snow参数。
有两段代码,每个网页一个。除了指向各自参数的网址以外,它们是相同的。
下面的代码为其中一个参数data.frame
输出几乎预期的swe
,但对于另一个参数sub
,结果data.frame
显然部分完成相对于原始数据以及错误的值(请参见底部的小标题)。我在想,因为SHEF格式至少是一致的,并且因为可能有功能/库仅用于这种事情,所以转换可能会有一个完全不同的角度/所需的步骤大大减少了?
雪参数1(“ swe”)(雪水当量): https://www.nohrsc.noaa.gov/shef_archive/index.html?rfc=cnrfc&product=swe&year=2019&month=3&day=27&hour=12(灰色框中的数据)
雪参数2(“ sub”)(升华): https://www.nohrsc.noaa.gov/shef_archive/index.html?rfc=cnrfc&product=sb&year=2019&month=3&day=27&hour=12(灰色框中的数据)
我希望有两个data.frames
,swe
和sub
,每个都有4列。下面是工作示例。
library(tidyverse)
library(rvest)
library(lubridate)
# webpage to scrape data from, March27's parameter "swe"
march27_param_swe <-
"https://www.nohrsc.noaa.gov/shef_archive/index.html?rfc=cnrfc&product=swe&year=2019&month=3&day=27&hour=12"
####### snow water equivalent (swe) [inches] ##########
# scrape
scrapedtext <- read_html(march27_param_swe) %>% html_node(".notes") %>%
html_text()
swe <- tibble(txt = read_lines(scrapedtext)) %>%
mutate(
row = row_number(),
with_code = str_extract(txt, "^[A-z0-9]{5}\\s+\\d+(\\.)?\\d"),
wo_code = str_extract(txt, "^:?\\s+\\d+(\\.)?\\d") %>%
str_extract("[:digit:]+\\.?[:digit:]"),
basin_desc = if_else(!is.na(with_code), lag(txt, 1), NA_character_) %>%
str_sub(start = 2)
)
swe <- swe %>% separate(with_code, c("code", "val"), sep = "\\s+") %>%
mutate(value = case_when(
!is.na(val) ~ val,
!is.na(wo_code) ~ wo_code,
TRUE ~ NA_character_) %>%
as.numeric) %>% filter(!is.na(value))
swe <- swe %>% mutate(code = zoo::na.locf(code), basin_desc = zoo::na.locf(basin_desc) ,
elevz = gsub(".*(inches))","",txt)) %>%
select(code, value, basin_desc, elevz) %>%
mutate(elevz = trimws(elevz))
dim(swe)
#[1] 643 4
head(swe)
# # A tibble: 6 x 4
# code value basin_desc elevz
# <chr> <dbl> <chr> <chr>
# 1 ACSC1 0 San Antonio Ck - Sunol "Entire Basin"
# 2 ADLC1 0 Arroyo De La Laguna "Entire Basin"
# 3 ADOC1 0 Santa Ana R - Prado Dam "Entire Basin"
# 4 AHOC1 0 Arroyo Honda nr San Jose "Entire Basin"
# 5 AKYC1 41.8 SF American nr Kyburz "Entire Basin"
# 6 AKYC1 3.9 SF American nr Kyburz "Base to 5000'"
#which is what I'm hoping for, except that I'd like the `value` to be
#<chr> to be able to accommodate the numbers and "NE" values reported, like this:
# # A tibble: 6 x 4
# code value basin_desc elevz
# <chr> <chr> <chr> <chr>
####### surface sublimation (sub) ##########
# same locations and day, different parameter, "sb", blowing snow
# sublimation [inches]
march27_param_temp <- "https://www.nohrsc.noaa.gov/shef_archive/index.html?rfc=cnrfc&product=sb&year=2019&month=3&day=27&hour=12"
scrapedtext <- read_html(march27_param_temp) %>%
html_node(".notes") %>% html_text()
sub <- tibble(txt = read_lines(scrapedtext)) %>%
mutate(
row = row_number(),
with_code = str_extract(txt, "^[A-z0-9]{5}\\s+\\d+(\\.)?\\d"),
wo_code = str_extract(txt, "^:?\\s+\\d+(\\.)?\\d") %>%
str_extract("[:digit:]+\\.?[:digit:]"),
basin_desc = if_else(!is.na(with_code), lag(txt, 1), NA_character_) %>%
str_sub(start = 2)
)
sub <- sub %>% separate(with_code, c("code", "val"), sep = "\\s+") %>%
mutate(value = case_when(
!is.na(val) ~ val,
!is.na(wo_code) ~ wo_code,
TRUE ~ NA_character_) %>%
as.numeric) %>% filter(!is.na(value))
sub <- sub %>% mutate(code = zoo::na.locf(code), basin_desc = zoo::na.locf(basin_desc) ,
elevz = gsub(".*(inches))","",txt)) %>%
select(code, value, basin_desc, elevz) %>%
mutate(elevz = trimws(elevz))
dim(sub)
#[1] 263 4 #dim[swe] was 643x4
head(sub)
# A tibble: 6 x 4
#code value basin_desc elevz
#<chr> <dbl> <chr> <chr>
#1 ADOC1 0 Santa Ana R - Prado Dam "Entire Basin"
#2 ADOC1 0 Santa Ana R - Prado Dam "Base to 5000'"
#3 ARCC1 0 Mad River - Arcata "Entire Basin"
#4 ARCC1 0 Mad River - Arcata "Base to 5000'"
#5 BCAC1 0 Little Truckee - Boca Dam "Entire Basin"
#So `sub` should be the same size `data.frame` as swe, and
#sub$value's are supposed to be (as per the source page above:
# https://www.nohrsc.noaa.gov/shef_archive/index.html?rfc=cnrfc&product=sb&year=2019&month=3&day=27&hour=12 ):
#head(desired_sub)
# A tibble: 6 x 4
#code value basin_desc elevz
#<chr> <chr> <chr> <chr>
#1 ADOC1 NE Santa Ana R - Prado Dam "Entire Basin"
#2 ADOC1 NE Santa Ana R - Prado Dam "Base to 5000'"
#3 ARCC1 0.000 Mad River - Arcata "Entire Basin"
#4 ARCC1 NE Mad River - Arcata "Base to 5000'"
#5 BCAC1 -0.016 Little Truckee - Boca Dam "Entire Basin"
答案 0 :(得分:2)
我认为您的问题可能是由于数据输出不一致:带有代码的行可以以冒号开头或不以冒号开头。
我编写了一个新代码,通过搜索以代码(或:+代码)开头的行,然后将每个块读入数据帧来识别数据块。
尝试一下:
library(rvest)
library(stringr)
# Read an individual block
readBlock = function(text){
basin = str_replace(string = text[1], pattern = "^:", replacement = "")
block = text[-1]
code = str_match(block[1], "[A-Z0-9]{5}")[1]
block = str_replace(block, "^(:?[^ ]+|:)", "")
block = str_replace(block, "%", "(%)")
block = str_replace_all(block, "[;():]", "|")
block = trimws(block)
block = str_split(block,"\\|")
block = as.data.frame(do.call(rbind, block))
colnames(block) = c("Value","Calc", "Units", "Location")
block$Code = code
block$Basin = basin
return(block)
}
# Find blocks starting index
findBlocks = function(text){
index = which(str_detect(text,"^:?[A-Z0-9]{5}"))
index = index[index > 10]
index = index - 1
index = c(index, 1 + which(str_detect(text,"\\.END")))
return(index)
}
# return a data frame with all blocks
readAllBlocks = function(index, text){
blocks = lapply(1:(length(index)-1), function(x){
blockText = text[index[x]:(index[x+1]-2)]
readBlock(blockText)
})
blocks = do.call(rbind, blocks)
return(blocks)
}
####### snow water equivalent (swe) [inches] ##########
march27_param_swe = "https://www.nohrsc.noaa.gov/shef_archive/index.html?rfc=cnrfc&product=swe&year=2019&month=3&day=27&hour=12"
# scrape
scrapedtext = html_text(html_node(read_html(march27_param_swe),".notes"))
scrapedtext = unlist(str_split(scrapedtext,"\n"))
block_index = findBlocks(scrapedtext)
swe = readAllBlocks(block_index, scrapedtext)
####### surface sublimation (sub) ##########
march27_param_temp = "https://www.nohrsc.noaa.gov/shef_archive/index.html?rfc=cnrfc&product=sb&year=2019&month=3&day=27&hour=12"
scrapedtext = html_text(html_node(read_html(march27_param_temp),".notes"))
scrapedtext = unlist(str_split(scrapedtext,"\n"))
block_index = findBlocks(scrapedtext)
sub = readAllBlocks(block_index, scrapedtext)
编辑:
如果单元%
没有括号,请在将其包围之前将其括起来。这行应该可以解决问题:
block = str_replace(block, "%", "(%)")
我编辑了上面的代码,将其包含在需要的地方。