从具有数千个标签的excel书中读取数据

时间:2018-01-10 20:47:56

标签: r xlconnect readxl

我正在读取各个xlsx文件中的数据,数据存储在每个工作簿文件中的10-20 个别选项卡中。第一个工作表包含一个主数据表,包括指向具有更多数据的各个选项卡的链接。 基于列的“选项卡式”数据在被附加到主数据之前进行汇总和转置 主数据表本身很大(10千行×数百列),附加数据选项卡的权利很小(几个cols乘以10到几行')。

在调用XLConnect(R 3.4.0,RStudio 1.1.383,64位,8G机器)时已经使用loadWorkbook()软件包崩溃了内存,否则我可以按照{ {3}}

因为我需要从单个选项卡加载,所以我当前正在使用嵌套的for()循环来加载每个单独的选项卡数据。但是,使用我的选项卡数量,每个循环需要将近一分钟,总执行时间将近一周!使用嵌套的for()循环也显然是非整洁的,所以我怀疑有更简洁,更快速的方法来实现这一点,但是看不到它。

我已经在链接中读到了R中的专用df(linkReferences)。 数据源不是我的,所以我坚持使用提供的输入。
问题纯粹与读取纸张的速度有关,随着文件中的纸张数量(以及文件大小)的增加,纸张的增长速度会增加。

我正在寻找任何解决方案来加快速度,并使用自包含的最小示例进行更新。 在我的电脑上:n = 10给出时间/张0.16秒,n = 100 ~0.56秒/张和n = 1000 ~3秒/张,这与我在真实中看到的相似数据(< 10秒/张为16k张)

library(tidyverse)

number_of_sheets= 100

# =========================================================================
# CREATE  SAMPLE  FILE .  Layout similar to actual data

library(openxlsx)

my.sheets.file <- "sampleXLSX.xlsx"

linkReferences <- data_frame( sheet = str_c("Data ",seq(1:number_of_sheets)) )

wb <- write.xlsx(linkReferences, file=my.sheets.file)

sample_header <-data.frame( head_name = c("head1", "head2","head3","head4","head5") ,
                            head_text = c("text1", "text2","text3","text4","text5") )

set.seed(31415)

for (i in 1:number_of_sheets) {
     cat(i,"..")
     sheet_name_i <- paste0("Data ",i)
     addWorksheet(wb, sheetName = sheet_name_i)

     writeData(wb, sheet=sheet_name_i, sample_header, startCol = "B", startRow=2)

     n = ceiling( runif(1)*200 )
     sample_data <- data_frame(A=seq(1:n), 
                               B= runif(n),
                               C= sample(seq(1:5),n,replace=TRUE))

     writeData(wb, sheet=sheet_name_i, sample_data, startCol = "B", startRow=10)
}

saveWorkbook(wb, file=my.sheets.file, overwrite=TRUE)


#===========================================================================
# THIS IS THE ACTUAL QUESTION
# Read from file with many tabs

library(readxl)
library(stringr)

linkReferences <- linkReferences %>% 
 mutate( Head1 = NA, Head2 = NA, Head3 = NA, Head4 = NA, Head5 = NA,
         A.1   = NA, B.1   = NA, C.1   = NA, 
         A.2   = NA, B.2   = NA, C.2   = NA, 
         A.3   = NA, B.3   = NA, C.3   = NA, 
         A.4   = NA, B.4   = NA, C.4   = NA, 
         A.5   = NA, B.5   = NA, C.5   = NA
 )
linkReferences.nrows = nrow(linkReferences)
lRnames <- names(linkReferences)

start.row=1
start_time <- Sys.time()
for (i in start.row:linkReferences.nrows){
     cat("i=",i, " / ",linkReferences.nrows,"\n")

     start_time_i=Sys.time()
     linked_data <- read_xlsx(my.sheets.file,   
                              sheet=as.character(linkReferences[i,"sheet"]), 
                              skip=2, 
                              col_types = c("text","text","text"), 
                              col_names=FALSE) 
     print(Sys.time()-start_time_i) # This takes 99% of the loop time

     linkReferences[i,2:6] <- unlist( linked_data[1:5,2])

     data_head_row <- which( linked_data[,1]=="A")

     names(linked_data) <- c("A","B","C")

     linked_data <-  linked_data[ (data_head_row+1):(nrow(linked_data)),]

     #  create a (rather random) sample summary
      summary_linked_data <- linked_data%>% 
          group_by(C) %>% 
          summarise(B=last(B), A=last(A)) %>% 
          arrange(desc(C)) 

     # not all data has the full range of options, so use actual number
      summary_linked_data_nrows <- nrow(summary_linked_data)

     #start_time_i2 <- Sys.time()
     for( ii in 1:summary_linked_data_nrows) {
          linkReferences[i,    match(str_c("A.",ii),lRnames):match(str_c("C.",ii),lRnames)] <-
               summary_linked_data[ii,]
         }
     #print(Sys.time()-start_time_i2)

     print(linkReferences[i,2:20])     

   # ________________________________________________________
   # BELOW IS ONLY FOR TEST LOOP TIMING STATS IN THIS EXAMPLE
     delta_time <- Sys.time() - start_time 
     delta_time_attr <- attr(delta_time, "units")
     row_time <- delta_time/(i-start.row+1)
     if (delta_time_attr =="mins") {
          row_time <- row_time*60
     } else if( delta_time_attr == "hours") {
          row_time <- row_time*3600
     }
     total_time <- row_time*(linkReferences.nrows-start.row-1)/3600

     cat( "Passed time: ", delta_time, attr(delta_time, "units"), 
          "   |   time/row: ", round(row_time,2), "secs.",
          "   |   Est total time:",
          round(total_time*60,2), "mins = )",
          round(total_time,2), "hours )",
          "\n---------------\n") 
}

# Conversion of data loaded as character to numeric can all happen outside loop once all data is loaded.

1 个答案:

答案 0 :(得分:0)

经过一些挖掘:XLConnect(),其矢量化工作表阅读功能(参见here)是清晰获胜者,前提是您的工作簿可以在内存中。 我不得不。减少我的工作簿的大小,和b。根据@Joshua的link将XLconnect内存设置为4GB。

对于上述问题的1000张示例:
wb <- loadWorkbook()花了15秒钟 linked_data_lst = readWorksheet()花费了34秒 从现在的内存列表中提取数据for (i in 1:nr_linked_data){...}需要86秒 总时间为0.135秒/张(比上面的代码快22倍)

#============================================================================
# now read it again

library(stringr)

options(java.parameters = "-Xmx4g" )
library(XLConnect)

linkReferences <- linkReferences %>% 
     mutate( Head1 = NA, Head2 = NA, Head3 = NA, Head4 = NA, Head5 = NA,
             A.1   = NA, B.1   = NA, C.1   = NA, 
             A.2   = NA, B.2   = NA, C.2   = NA, 
             A.3   = NA, B.3   = NA, C.3   = NA, 
             A.4   = NA, B.4   = NA, C.4   = NA, 
             A.5   = NA, B.5   = NA, C.5   = NA
     )

linkReferences.nrows = nrow(linkReferences)
lRnames <- names(linkReferences)
lRcols <- c(match(str_c("A.1"),lRnames):match(str_c("C.5"),lRnames))
lRheadCols <- c((lRcols[1]-5):(lRcols[1]-1))

start_time <- Sys.time()
wb <- loadWorkbook(my.sheets.file)
Sys.time() - start_time

start.row=1
end.row = linkReferences.nrows

start_time0 <- Sys.time()
linked_data_lst = readWorksheet(wb, 
                                sheet=linkReferences[start.row:end.row,][["sheet"]],
                                startCol = 2,
                                endCol   = 4,
                                startRow = 3,
                                header   = FALSE)

delta_time <- (Sys.time() - start_time0) %>% print()

nr_linked_data <- length(linked_data_lst)

start_time <- Sys.time()

for (i in 1:nr_linked_data ) {
     cat("i=",i, " / ",nr_linked_data,"\n")

     linked_data <- as_tibble(linked_data_lst[[i]])

# EVERYTHING BELOW HERE IS EXACTLY SAME AS IN QUESTION CODE
# =========================================================

     linkReferences[i,lRheadCols] <- unlist( linked_data[1:5,2])

     data_head_row <- which( linked_data[,1]=="A")

     names(linked_data) <- c("A","B","C")

     linked_data <- linked_data[ (data_head_row+1):(nrow(linked_data)),]
     linked_data <- linked_data %>% mutate_all( funs(as.numeric) )

     #  create a (rather random) sample summary
     summary_linked_data <- linked_data%>% 
          group_by(C) %>% 
          summarise(B=last(B), A=last(A)) %>% 
          arrange(desc(C)) 

     # not all data has the full range of options, so use actual number
     summary_linked_data_nrows <- nrow(summary_linked_data)

     #start_time_i2 <- Sys.time()
     for( ii in 1:summary_linked_data_nrows) {
          linkReferences[i, match(str_c("A.",ii),lRnames):match(str_c("C.",ii),lRnames)] <-
               summary_linked_data[ii,]
     }
     #print(Sys.time()-start_time_i2)

     print(linkReferences[i,lRheadCols[1]:max(lRcols)])

     delta_time <- Sys.time() - start_time 
     delta_time_attr <- attr(delta_time, "units")
     row_time <- delta_time/(i-start.row+1)
     if (delta_time_attr =="mins") {
          row_time <- row_time*60
     } else if( delta_time_attr == "hours") {
          row_time <- row_time*3600
     }
     total_time <- row_time*(linkReferences.nrows-start.row-1)/3600

     cat( "Passed time: ", delta_time, attr(delta_time, "units"), 
          "   |   time/row: ", round(row_time,2), "secs.",
          "   |   Est total time:",
          round(total_time*60,2), "mins = )",
          round(total_time,2), "hours )",
          "\n---------------\n") 
}