扩展为1分钟,计算平均值

时间:2016-11-30 18:47:44

标签: r time data.table time-series dplyr

我正在尝试操纵时间以便以每分钟为基础重新分配平均空闲时间

#############################################################
##Reproducible example 1 (n=10):
#############################################################    df.in <- structure(list(id = c(31, 46, 60, 57, 44, 04, 18, 55, 
22, 5), loc = structure(c(1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 
3L, 3L), .Label = c("A", "B", "C"), class = "factor"), t.arrive = structure(c(1425197374, 
1425197392, 1425197411, 1425198171, 1425198190, 1425196800, 1425197837, 
1425198027, 1425197507, 1425198026), class = c("POSIXct", "POSIXt"
), tzone = "UTC"), t.leave = structure(c(1425197409, 1425197531, 
1425197555, 1425198171, 1425198296, 1425196992, 1425197865, 1425198028, 
1425197512, 1425198026), class = c("POSIXct", "POSIXt"), tzone = "UTC"), 
    idle = c(35, 139, 144, 0, 106, 192, 28, 1, 5, 0)), .Names = c("id", 
"loc", "t.arrive", "t.leave", "idle"), class = c("tbl_df", "tbl", 
"data.frame"), row.names = c(NA, -10L))


#############################################################
##Reproducible example 2 (n=100):
#############################################################
> dput(df.in)
structure(list(id = c(78, 93, 107, 84, 104, 91, 71, 66, 189, 
182, 92, 209, 96, 84, 50, 103, 182, 183, 74, 132, 101, 78, 88, 
93, 48, 107, 82, 72, 182, 83, 66, 91, 104, 50, 71, 96, 103, 74, 
182, 101, 132, 84, 78, 88, 93, 107, 83, 182, 48, 66, 96, 51, 
75, 65, 102, 80, 106, 63, 156, 51, 75, 79, 67, 65, 85, 94, 89, 
106, 69, 80, 79, 67, 69, 52, 105, 94, 73, 95, 100, 76, 55, 99, 
60, 69, 53, 86, 52, 105, 90, 64, 95, 73, 63, 100, 76, 51, 99, 
53, 75, 52), loc = structure(c(1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 4L, 4L, 4L, 4L, 4L, 
4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 
6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 
6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L), .Label = c("A", 
"HPB", "HPS", "B", "OPP-B", "C"), class = "factor"), t.arrive = structure(c(1425197374, 
1425197392, 1425197411, 1425197927, 1425198171, 1425198190, 1425198194, 
1425198227, 1425198303, 1425198475, 1425198812, 1425198924, 1425199119, 
1425199199, 1425199235, 1425199355, 1425199528, 1425199544, 1425199641, 
1425199643, 1425199648, 1425199801, 1425199812, 1425200087, 1425200103, 
1425200310, 1425200454, 1425200478, 1425200517, 1425200611, 1425200669, 
1425201076, 1425201105, 1425201275, 1425201287, 1425201378, 1425201536, 
1425201604, 1425201628, 1425201767, 1425201893, 1425202137, 1425202244, 
1425202255, 1425202557, 1425202566, 1425202879, 1425202962, 1425203094, 
1425203109, 1425203380, 1425196800, 1425196800, 1425197837, 1425198027, 
1425198955, 1425199074, 1425199342, 1425199465, 1425199855, 1425199929, 
1425199970, 1425200480, 1425200517, 1425200950, 1425201289, 1425201357, 
1425201879, 1425202374, 1425202982, 1425202987, 1425203318, 1425197507, 
1425198026, 1425198378, 1425198390, 1425198994, 1425199059, 1425199298, 
1425199522, 1425199528, 1425199728, 1425200115, 1425200289, 1425200373, 
1425200547, 1425200679, 1425200880, 1425200909, 1425201364, 1425201509, 
1425201801, 1425201910, 1425202039, 1425202246, 1425202490, 1425202555, 
1425202589, 1425203048, 1425203108), class = c("POSIXct", "POSIXt"
), tzone = "UTC"), t.leave = structure(c(1425197409, 1425197531, 
1425197555, 1425197927, 1425198171, 1425198296, 1425198194, 1425198315, 
1425198411, 1425198553, 1425198818, 1425198924, 1425199119, 1425199219, 
1425199235, 1425199359, 1425199528, 1425199558, 1425199652, 1425199734, 
1425199648, 1425199801, 1425200028, 1425200198, 1425200240, 1425200364, 
1425200492, 1425200619, 1425200610, 1425200910, 1425200859, 1425201100, 
1425201302, 1425201275, 1425201467, 1425201393, 1425201569, 1425201704, 
1425201805, 1425201951, 1425202057, 1425202262, 1425202370, 1425202255, 
1425202667, 1425202840, 1425202913, 1425202990, 1425203094, 1425203109, 
1425203380, 1425196992, 1425196800, 1425197865, 1425198028, 1425198984, 
1425199149, 1425199356, 1425199466, 1425199902, 1425200051, 1425200286, 
1425200783, 1425200845, 1425201125, 1425201586, 1425201640, 1425201879, 
1425202377, 1425202986, 1425202987, 1425203318, 1425197512, 1425198026, 
1425198378, 1425198486, 1425199021, 1425199078, 1425199325, 1425199558, 
1425199810, 1425199939, 1425200118, 1425200305, 1425200485, 1425200782, 
1425200894, 1425201065, 1425201111, 1425201364, 1425201623, 1425201857, 
1425202015, 1425202039, 1425202404, 1425202671, 1425202651, 1425202834, 
1425203105, 1425203198), class = c("POSIXct", "POSIXt"), tzone = "UTC"), 
    idle = c(35, 139, 144, 0, 0, 106, 0, 88, 108, 78, 6, 0, 0, 
    20, 0, 4, 0, 14, 11, 91, 0, 0, 216, 111, 137, 54, 38, 141, 
    93, 299, 190, 24, 197, 0, 180, 15, 33, 100, 177, 184, 164, 
    125, 126, 0, 110, 274, 34, 28, 0, 0, 0, 192, 0, 28, 1, 29, 
    75, 14, 1, 47, 122, 316, 303, 328, 175, 297, 283, 0, 3, 4, 
    0, 0, 5, 0, 0, 96, 27, 19, 27, 36, 282, 211, 3, 16, 112, 
    235, 215, 185, 202, 0, 114, 56, 105, 0, 158, 181, 96, 245, 
    57, 90)), class = "data.frame", .Names = c("id", "loc", "t.arrive", 
"t.leave", "idle"), row.names = c(NA, -100L))

以下是我想要获取的内容:获取每个ID在任何给定时间内贡献的空闲时间总和(必须按 loc <分组/ em>的)。然后,取平均值: schematic

...以及我在尝试的内容

## Expand time into 1-min intervals
df.min <- df.in %>%
              rownames_to_column() %>%
              group_by(rowname) %>%  
              do(data.frame(min = seq(.$t.arrive, .$t.leave, by = "1 min"),
                            id = first(.$id),
                            loc = first(.$loc),
                            idle.mean = as.numeric(mean(.$idle))
              ))


## Round Off to 0 seconds to make it more tractable:
df.min$min <- as.POSIXct(round(df.min$min, "mins"))


## Calculate within each minute
df.min <- df.min %>% 
            group_by(min, loc) %>% 
            summarise(units.count = n(),
                      cum.queue.min = sum(idle.mean)/60
                      )

## Take 1 min average idle time per id
df.min <- as.data.frame(df.min)
df.min <- df.min %>%
            mutate(queue.tmean = cum.queue.min / units.count) %>%
            select(-units.count, -cum.queue.min) %>%
            arrange(min, loc)

3 个答案:

答案 0 :(得分:2)

一种方法是创建一个数据帧,其中每一行由id-loc-time标识,其中每个时间是一分钟,其值是该时间的空闲秒数,例如

  id loc                time secs
1 46   A 2015-03-01 08:10:00   60
2 46   A 2015-03-01 08:11:00   60
3 46   A 2015-03-01 08:12:00   19

创建这样一个数据帧的简单函数,给定开始时间,空闲秒数和其他标识符:

library(lubridate)
make_obs <- function(start, idle, id, loc) {
  st <- round_date(start, "min")
  mins <- trunc(idle / 60)
  times <- c(st, if (mins > 0) st + minutes(1 : mins))
  data.frame(
    id = id, loc = loc, time = times, secs = c(rep(60, mins), idle %% 60)
    )
}

然后使用mapply来浏览原始数据集,并使用dplyr函数进行汇总:

library(dplyr)
out <- do.call(rbind, mapply(make_obs, 
  df.in$t.arrive, df.in$idle, df.in$id, df.in$loc,
  SIMPLIFY = FALSE))
group_by(out, loc, time) %>%
  summarise(idle = mean(secs))

输出:

Source: local data frame [13 x 3]
Groups: loc [?]

      loc                time     idle
   <fctr>              <dttm>    <dbl>
1       A 2015-03-01 08:10:00 51.66667
2       A 2015-03-01 08:11:00 60.00000
3       A 2015-03-01 08:12:00 21.50000
4       A 2015-03-01 08:23:00 30.00000
...

答案 1 :(得分:2)

data.table方法

非常有效的data.table方法,没有循环

library(lubridate)
library(data.table)
setDT(dt.in)
dt.in[, arrive_min := round_date(t.arrive, "mins")]
dt2 <- dt.in[, .(mins = arrive_min + (0:floor(idle/60))*60) , by = .(id, loc, arrive_min)]

此摘要假定每个分组变量具有唯一arrive_min,通过将arrive_min添加到分组变量可自动满足这些变量。 (请参阅下面的不稳定示例。由于differing rows的非唯一性,早期解决方案会引发arrive_min错误。一旦我们解决了这些问题,剩下的就非常简单了

dt.in[, mins:=arrive_min, ]
dt_full <- dt.in[dt2, on = c("id", "loc", "mins")]
dt_full[, .(mins = mins, idle=c(rep(60, idle[1]/60), idle[1]%%60)), by = .(id, loc, i.arrive_min)
   ][, .(ave_idle=mean(idle)), by = .(mins, loc)]


#                   min1 loc ave_idle
# 1: 2015-03-01 08:10:00   A 51.66667
# 2: 2015-03-01 08:11:00   A 60.00000
# 3: 2015-03-01 08:12:00   A 21.50000
# 4: 2015-03-01 08:23:00   A 30.00000
# 5: 2015-03-01 08:24:00   A 46.00000
# 6: 2015-03-01 08:00:00   B 60.00000
# 7: 2015-03-01 08:01:00   B 60.00000
# 8: 2015-03-01 08:02:00   B 60.00000
# 9: 2015-03-01 08:03:00   B 12.00000
#10: 2015-03-01 08:17:00   B 28.00000
#11: 2015-03-01 08:20:00   B  1.00000
#12: 2015-03-01 08:12:00   C  5.00000
#13: 2015-03-01 08:20:00   C  0.00000

注意,在创建mins = arrive_min + (0:floor(idle/60))*60idle=c(rep(60, idle[1]/60), idle[1]%%60)时,我们假设每个分组变量(idloc,{{1 }},有唯一的arrive_min。前者将idle转换为t.arrive=08:01:00, idle=159,后者将mins=c(08:01:00, 08:02:00, 08:03:00)转换为c(159, NA , NA)。因此,如果您有以下数据点,则应修改此方法:

c(60, 60, 39)

dplyr

我们不妨使用 id loc t.arrive t.leave idle 1 78 A 2015-03-01 08:09:36 2015-03-01 08:09:58 22 2 78 A 2015-03-01 08:09:34 2015-03-01 08:10:09 35 。 与dplyr对应物相比,do操作几乎没有笨拙。

data.table

答案 2 :(得分:1)

我认为这就是你要找的东西:

# Create sequence of datetimes by second from t.arrive to t.leave for each observation
df <- NULL
for (i in 1:nrow(df.in)) {
    df <- bind_rows(
        df,
        slice(data_frame(  # slice cuts off last second entry
            t.present = seq(df.in[[i, 't.arrive']], df.in[[i, 't.leave']], by = 'sec'),
            id = df.in[[i, 'id']],
            loc = df.in[[i, 'loc']]), -n()))
}

# Calculate target metric
df$t.present.min <- as.POSIXct(trunc(df$t.present, 'mins'))
result <- df %>%
    group_by(id, loc, t.present.min) %>%
    summarise(secs.present = n()) %>%
    group_by(loc, t.present.min) %>%
    summarise(avg.secs = mean(secs.present))
result

结果:

      loc       t.present.min avg.secs
   <fctr>              <dttm>    <dbl>
1       A 2015-03-01 08:09:00 17.00000
2       A 2015-03-01 08:10:00 39.33333
3       A 2015-03-01 08:11:00 60.00000
4       A 2015-03-01 08:12:00 23.00000
5       A 2015-03-01 08:23:00 50.00000
6       A 2015-03-01 08:24:00 56.00000
7       B 2015-03-01 08:00:00 60.00000
8       B 2015-03-01 08:01:00 60.00000
9       B 2015-03-01 08:02:00 60.00000
10      B 2015-03-01 08:03:00 12.00000
11      B 2015-03-01 08:17:00 28.00000
12      B 2015-03-01 08:20:00  1.00000
13      C 2015-03-01 08:11:00  5.00000