我正在尝试在R工作中获取并行处理的foreach包,我遇到了几个问题:
在CRAN for Windows上不存在使foreach工作所需的doMC包。一些博客建议doSNOW应该做同样的工作。但是,当我使用doSNOW运行foreach命令时,%dopar%
似乎不比%do%
工作得快。实际上它要慢得多。我的CPU是Intel i7 860 @ 2.80GHz,内存为8 GB。以下是我的代码:
##Run example in 1 core
require(foreach)
require(doSNOW)
x= iris[which(iris[,5] != "setosa"),c(1,5)]
trials = 10000
system.time({
r= foreach(icount(trials), .combine=cbind) %do% {
ind=sample(100,100,replace=TRUE)
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
coefficients(results1)
}
})[3]
# elapsed
# 37.28
# Same example in 2 cores
registerDoSNOW(makeCluster(2,type="SOCK"))
getDoParWorkers()
trials = 10000
system.time({
r= foreach(icount(trials), .combine=cbind) %dopar% {
ind=sample(100,100,replace=TRUE)
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
coefficients(results1)
}
})[3]
# elapsed
# 108.14
我重新安装了所有需要的软件包,但仍然存在同样的问题。这是输出:
sessionInfo()
#R version 2.15.1 (2012-06-22)
#Platform: i386-pc-mingw32/i386 (32-bit)
#locale:
#[1] LC_COLLATE=English_United States.1252
#[2] LC_CTYPE=English_United States.1252
#[3] LC_MONETARY=English_United States.1252
#[4] LC_NUMERIC=C
#[5] LC_TIME=English_United States.1252
#attached base packages:
#[1] parallel stats graphics grDevices datasets utils methods
#[8] base
#other attached packages:
#[1] doParallel_1.0.1 codetools_0.2-8 doSNOW_1.0.6 snow_0.3-10
#[5] iterators_1.0.6 foreach_1.4.0 rcom_2.2-5 rscproxy_2.0-5
#loaded via a namespace (and not attached):
#[1] compiler_2.15.1 tools_2.15.1
答案 0 :(得分:4)
最好在Windows中使用doParallel()
:
require(foreach)
require(doParallel)
cl <- makeCluster(6) #use 6 cores, ie for an 8-core machine
registerDoParallel(cl)
然后运行您的foreach() %dopar% {}
编辑:OP提到仍然看到问题,所以包括我的确切代码。在4核Windows7 VM上运行,R 2.15.1 32位,只允许doParallel
使用我的3个内核:
require(foreach)
require(doParallel)
cl <- makeCluster(3)
registerDoParallel(cl)
x= iris[which(iris[,5] != "setosa"),c(1,5)]
trials = 1000
system.time(
foreach(icount(trials), .combine=cbind) %do%
{
ind=sample(100,100,replace=TRUE)
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
coefficients(results1)
})[3]
system.time(
foreach(icount(trials), .combine=cbind) %dopar%
{
ind=sample(100,100,replace=TRUE)
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
coefficients(results1)
})[3]
在我的情况下,我%do%
获得17.6秒,%dopar%
获得14.8秒。观察任务执行时,似乎大部分执行时间是cbind
,这是并行运行的常见问题。在我自己的模拟中,我已经完成了自定义工作,将我的详细结果保存为并行任务的一部分,而不是通过foreach
返回它们,以消除这部分开销。 YMMV。
答案 1 :(得分:3)
我知道这是一个较旧的问题,但我在搜索其他内容时遇到了它,并认为我会添加我的解决方案。我发现将试验总数分成不同的试验组(组数等于处理器核心数)更有效,而不是试图同时并行化所有试验并处理所有开销。以下是使用OP示例的比较:
require(doParallel)
x <- iris[which(iris[,5] != "setosa"),c(1,5)]
trials <- 10000
# example using a single core
t1 <- system.time({
r1 <- foreach(icount(trials), .combine=cbind) %do% {
ind <- sample(100,100,replace= TRUE)
results1 <- glm(x[ind,2]~x[ind,1],family=binomial(logit))
coefficients(results1)
}
})[3]
# example using 4 cores and parallelizing each model trial
nCores <- 4
cl <- makeCluster(nCores)
registerDoParallel(cl)
t2 <- system.time({
r2 <- foreach(icount(trials), .combine=cbind) %dopar% {
ind <- sample(100,100,replace= TRUE)
results1 <- glm(x[ind,2]~x[ind,1],family=binomial(logit))
coefficients(results1)
}
})[3]
# example using 4 cores and parallelizing a group of trial runs
trialsPerCore <- as.integer(ceiling(trials / nCores)) # number of trials
# do to on each core
# function to do a single model run
model <- function(x) {
ind <- sample(100,100,replace= TRUE)
results1 <- glm(x[ind,2]~x[ind,1],family=binomial(logit))
coefficients(results1)
}
# function producing a group of model runs
modelRun <- function(trials, x) {
replicate(trials, model(x))
}
# call the model run for each core
t3 <- system.time(
r3 <- foreach(icount(nCores), .combine= cbind) %dopar% modelRun(trialsPerCore, x)
)[3]
stopCluster(cl)
运行Ubuntu 12.04的3.4 GHz四核i7的执行时间:
> t1
elapsed
34.5
> t2
elapsed
26.5
> t3
elapsed
8.295
答案 2 :(得分:2)
require(foreach)
require(doSNOW)
x= iris[which(iris[,5] != "setosa"),c(1,5)]
trials = 1000
system.time(
foreach(icount(trials), .combine=cbind) %do%
{
ind=sample(100,100,replace=TRUE)
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
coefficients(results1)
})[3]
registerDoSNOW( makeCluster(2,type="SOCK"))
getDoParWorkers()
trials = 1000
system.time(
foreach(icount(trials), .combine=cbind) %dopar%
{
ind=sample(100,100,replace=TRUE)
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
results1 = glm(x[ind,2]~x[ind,1],family=binomial(logit))
coefficients(results1)
})[3]
模拟foreach中的繁重工作,我为两者做了一个收支平衡。这是开销的代价。我最近有一个类似的情况,直接用MPI处理它,它的开销要低得多,但使用起来要复杂得多(Dirk不同意,我猜)。 (把它改成“不那么优雅”。