R合并数据帧中的行

时间:2016-01-07 12:33:35

标签: r dataframe

以下是大型数据框的head

head(Hdata_soil)
                      X_id           timestamp address rssi batt_v soil_temp_1 soil_temp_2 soil_temp_3 soil_moisture_1
1 565846060dd8e408e3817c58 2015-11-27 12:01:10      A8  -65     NA          NA          NA          NA              NA
2 565846070dd8e408e3817c59 2015-11-27 12:01:11      A8   NA     NA        9.73     -273.15       14.63             647
3 565846cf0dd8e408e3817caf 2015-11-27 12:04:31      A7  -64     NA          NA          NA          NA              NA
4 565846cf0dd8e408e3817cb0 2015-11-27 12:04:31      A7   NA     NA        8.56        9.46        9.64             660
5 565847650dd8e408e3817cf5 2015-11-27 12:07:01      A8  -64     NA          NA          NA          NA              NA
6 565847660dd8e408e3817cf6 2015-11-27 12:07:02      A8   NA     NA        9.82     -273.15       14.29             643

可以从dropbox

访问完整数据集

正如您所看到的,每个address连续2次观察,timestamps相差约1秒。变量在这两个观察之间分开。我怎样才能将它们合并到一行,保留第一个timestamp

确保只有来自同一address的连续2次观察才会发生这种情况。

如果有人能指出我正在使用的包裹/功能方向,我真的很感激。

4 个答案:

答案 0 :(得分:0)

查看以下符合您需求的代码。首先,将timestamps列转换为class' POSIXlt'这允许确定单个观察之间的时间差。然后我使用foreach并行循环遍历所有行,并跳过在上一次迭代期间已经合并到另一行的所有记录(保存在向量'使用')。 whichdifftime结合使用可以识别连续的观察结果(例如,距当前处理的观察结果5秒内)。最后(并且仅当候选记录中存在当前观察的地址时),正在合并这些行,用连续观察中的值替换当前处理行中的缺失值。

## load 'foreach' package
library(foreach)

## import and reformat data
Hdata_soil <- read.csv("Hdata_soil.csv", header = TRUE, 
                       stringsAsFactors = FALSE)

## reformat timestamps
timestamps <- strptime(Hdata_soil$timestamp, format = "%Y-%m-%d %H:%M:%S")

## vector with information about merged lines
used <- integer()
dat_out <- foreach(i = 1:length(timestamps), .combine = "rbind") %do% {

  ## skip current iteration if line has already been merged into another line
  if (i %in% used)
    return(NULL)

  ## identify consecutive observation (<5s)
  x <- timestamps[i]
  y <- timestamps[(i+1):length(timestamps)]

  # (subset same or consecutive days to reduce 
  # computation time of 'difftime')
  id_day <- which(as.Date(y) == as.Date(x) | 
                    as.Date(y) == (as.Date(x) + 1))
  y <- y[id_day]

  # (subset records within 5s from current observation)
  id_sec <- which(difftime(y, x, units = "secs") < 5)
  id <- id_day[id_sec]

  ## if consecutive observation(s) exist(s) and include address of 
  ## current observation, perform merge
  if (length(id) > 0 & 
        any(Hdata_soil[i+id, "address"] == Hdata_soil[i, "address"])) {

    for (j in 1:length(id)) {
      Hdata_soil_x <- data.frame(Hdata_soil[i, ])
      Hdata_soil_y <- data.frame(Hdata_soil[i+id[j], ])

      # overwrite all missing values in current line with values 
      # from consecutive line
      Hdata_soil_x[which(is.na(Hdata_soil_x) & !is.na(Hdata_soil_y))] <- 
        Hdata_soil_y[which(is.na(Hdata_soil_x) & !is.na(Hdata_soil_y))]

      # update information about merged lines
      used <- c(used, i, i+id[j])
    }

    # return merged line
    return(Hdata_soil_x)

  ## else return current line as is  
  } else {
    used <- c(used, i)
    return(data.frame(Hdata_soil[i, ]))
  }
}

但是,代码需要很长时间才能执行,这似乎与difftime相关。

 >     user   system  elapsed 
 > 2209.504   99.389 2311.996 

答案 1 :(得分:0)

我认为以下内容应该有效。制作唯一地址标签的向量。然后,对于每个地址标签,提取相关行并使用各种函数来选择所需的行(例如,最小时间戳,不是NA的rssi值等)。使用rbind.data.frame从最后的列表重建。

unad <- unique(Hdata_soil$address)

lst <- lapply(unad, function(ad){
    recs <- Hdata_soil[Hdata_soil$address == ad,]
    X_id <- recs$X_id[1]
    ts <- min(recs$timestamp)
    rssi <- recs$rssi[!is.na(recs$rssi)]
    if(length(rssi) == 0L) rssi <- NA else if(length(rssi) >= 2L) rssi <- mean(rssi) # or something - ensure end up with length 1
    ## remaining observations like rssi
    ## ...
    return(data.frame(X_id = X_id, timestamp = ts, address = ad, rssi = rssi, ...))
})

result <- do.call(rbind.data.frame, lst)
适应评论:

mtx <- matrix(1:nrow(Hdata_soil), nrow(Hdata_soil), 2)
col.names(mtx) <- c("startR", "endR")

# identifies consecutive duplicate addresses and groups together into subsets
for(r in 1:(nrow(mtx) - 1)){
    with(Hdata_soil, if(identical(address[r], address[r + 1])){
        mtx[r, 2] <- mtx[r, 2] + 1
        mtx[r + 1,] <- c(NA, NA)
    })
}

#remove nas - essentially noting that duplicate addresses have been grouped
mtx <- mtx[!is.na(mtx[, 1]),]

lst <- lapply(1:nrow(mtx), function(r){
    datsubset <- Hdata_soil[mtx[r, "startR"]:mtx[r, "endR"],, drop = FALSE]
    # aggregate the subset of rows into one row as you please
})

result <- do.call(rbind.data.frame, lst)

请注意,如果有任何地址连续出现三次,则需要进行一些调整。

答案 2 :(得分:0)

首先,我认为您的数据需要一个额外的id列,因为address每行对不是唯一的,也不考虑您想要如何对它们进行分组。为简单起见,我将id列定义为:

df$id <- as.character(c(1,1,2,2,3,3))

然后我们可以做以下

# Replace NA's by 0
df[is.na(df)] <- 0

# Extract numeric columns
tokeep <- which(sapply(df,is.numeric))

# Sum numeric columns per id
setDT(df)[,lapply(.SD,sum),by=id,.SDcols = tokeep]

哪个收益率:

   id rssi soil_temp_1 soil_temp_2 soil_temp_3 soil_moisture_1
1:  1  -65        9.73     -273.15       14.63             647
2:  2  -64        8.56        9.46        9.64             660
3:  3  -64        9.82     -273.15       14.29             643

您可以将其与原始df的其余非数字列合并,选择要放在X_idtimestamp中的唯一值。

答案 3 :(得分:0)

使用dplyr的这个解决方案可能会有用,如果您有信心舍入&#34;时间戳&#34;到最近的分钟将提供一个唯一的标识符与&#34;地址&#34;:

library(readr) # Required only for recreating your data frame
library(dplyr)

Hdata_soil <- readr::read_csv("X_id,timestamp,address,rssi,batt_v,soil_temp_1,soil_temp_2,soil_temp_3,soil_moisture_1
565846060dd8e408e3817c58,27/11/2015 12:01:10,A8,-65,NA,NA,NA,NA,NA
565846070dd8e408e3817c59,27/11/2015 12:01:11,A8,NA,NA,9.73,-273.15,14.63,647
565846cf0dd8e408e3817caf,27/11/2015 12:04:31,A7,-64,NA,NA,NA,NA,NA
565846cf0dd8e408e3817cb0,27/11/2015 12:04:31,A7,NA,NA,8.56,9.46,9.64,660
565847650dd8e408e3817cf5,27/11/2015 12:07:01,A8,-64,NA,NA,NA,NA,NA
565847660dd8e408e3817cf6,27/11/2015 12:07:02,A8,NA,NA,9.82,-273.15,14.29,643")

# Dplyr chain to create new vars, group then summarise
Hdata_soil <- dplyr::mutate(
  Hdata_soil,
  # Convert timestamp to POSIXct
  timestamp = as.POSIXct(strptime(timestamp, format = "%d/%m/%Y %H:%M:%S"))
  # Round to nearest minute
  , timestamp_round = as.POSIXct(round(timestamp, units = "mins"))
) %>% 
  # Group by nearest minute timestamps and address
  dplyr::group_by(timestamp_round, address) %>% 
  # Take minimum non-NA value
  dplyr::summarise_each(
    funs(min(., na.rm = TRUE))
  )

哪个收益率:

> # Print
> Hdata_soil
Source: local data frame [3 x 10]
Groups: timestamp_round [?]

      timestamp_round address                     X_id           timestamp  rssi batt_v soil_temp_1 soil_temp_2 soil_temp_3 soil_moisture_1
               (time)   (chr)                    (chr)              (time) (int)  (lgl)       (dbl)       (dbl)       (dbl)           (int)
1 2015-11-27 12:01:00      A8 565846060dd8e408e3817c58 2015-11-27 12:01:10   -65     NA        9.73     -273.15       14.63             647
2 2015-11-27 12:05:00      A7 565846cf0dd8e408e3817caf 2015-11-27 12:04:31   -64     NA        8.56        9.46        9.64             660
3 2015-11-27 12:07:00      A8 565847650dd8e408e3817cf5 2015-11-27 12:07:01   -64     NA        9.82     -273.15       14.29             643