mxnet LinearRegressionOutput性能不佳

时间:2016-12-19 07:25:21

标签: r regression mxnet

我无法使用mxnet LinearRegressionOutput图层获得合理的效果。

下面的自包含示例尝试对投入少量随机噪声的简单多项式函数(y = x1 + x2^2 + x3^3)进行回归。

使用了给定here的mxnet回归示例,以及包含隐藏层的稍微复杂的网络。

下面的示例还使用neuralnetnnet软件包训练回归网络,从图中可以看出这些软件包的效果要好得多。

我意识到性能不佳的网络的答案是进行一些超参数调整,但是我尝试了一系列值而没有任何性能提升。所以我有以下问题:

  1. 我的mxnet回归实现中是否有错误?
  2. 是否有人有经验可以帮助我从mxnet获得合理的性能,以解决像这里考虑的简单回归问题?
  3. 是否有其他人拥有性能良好的mxnet回归示例?
  4. 我的设置如下:

    MXNet version: 0.7
    R `sessionInfo()`: R version 3.3.2 (2016-10-31)
    Platform: x86_64-w64-mingw32/x64 (64-bit)
    Running under: Windows 7 x64 (build 7601) Service Pack 1
    

    mxnet的回归结果不佳:

    mxnet regression performance

    从这个可重复的例子中可以看出:

    ## SIMPLE REGRESSION PROBLEM
    # Check mxnet out-of-the-box performance VS neuralnet, and caret/nnet
    
    library(mxnet)
    library(neuralnet)
    library(nnet)
    library(caret)
    library(tictoc)
    library(reshape)
    
    # Data definitions
    nObservations <- 1000
    noiseLvl <- 0.1
    
    # Network config
    nHidden <- 3
    learnRate <- 2e-6
    momentum <- 0.9
    batchSize <- 20
    nRound <- 1000
    verbose <- FALSE
    array.layout = "rowmajor"
    
    # GENERATE DATA:
    df <- data.frame(x1=runif(nObservations),
                     x2=runif(nObservations),
                     x3=runif(nObservations))
    
    df$y <- df$x1 + df$x2^2 + df$x3^3 + noiseLvl*runif(nObservations)
    # normalize data columns
    # df <- scale(df)
    
    # Seperate data into train/test
    test.ind = seq(1, nObservations, 10)    # 1 in 10 samples for testing
    train.x = data.matrix(df[-test.ind, -which(colnames(df) %in% c("y"))])
    train.y = df[-test.ind, "y"]
    test.x = data.matrix(df[test.ind, -which(colnames(df) %in% c("y"))])
    test.y = df[test.ind, "y"]
    
    # Define mxnet network, following 5-minute regression example from here:
    # http://mxnet-tqchen.readthedocs.io/en/latest//packages/r/fiveMinutesNeuralNetwork.html#regression
    data <- mx.symbol.Variable("data")
    label <- mx.symbol.Variable("label")
    fc1 <- mx.symbol.FullyConnected(data, num_hidden=1, name="fc1")
    lro1 <- mx.symbol.LinearRegressionOutput(data=fc1, label=label, name="lro")
    
    # Train MXNET model
    mx.set.seed(0)
    tic("mxnet training 1")
    mxModel1 <- mx.model.FeedForward.create(lro1, X=train.x, y=train.y,
                                            eval.data=list(data=test.x, label=test.y),
                                            ctx=mx.cpu(), num.round=nRound,
                                            array.batch.size=batchSize,
                                            learning.rate=learnRate, momentum=momentum,
                                            eval.metric=mx.metric.rmse,
                                            verbose=FALSE, array.layout=array.layout)
    toc()
    
    # Train network with a hidden layer
    fc1 <- mx.symbol.FullyConnected(data, num_hidden=nHidden, name="fc1")
    tanh1 <- mx.symbol.Activation(fc1, act_type="tanh", name="tanh1")
    fc2 <- mx.symbol.FullyConnected(tanh1, num_hidden=1, name="fc2")
    lro2 <- mx.symbol.LinearRegressionOutput(data=fc2, label=label, name="lro")
    tic("mxnet training 2")
    mxModel2 <- mx.model.FeedForward.create(lro2, X=train.x, y=train.y,
                                            eval.data=list(data=test.x, label=test.y),
                                            ctx=mx.cpu(), num.round=nRound,
                                            array.batch.size=batchSize,
                                            learning.rate=learnRate, momentum=momentum,
                                            eval.metric=mx.metric.rmse,
                                            verbose=FALSE, array.layout=array.layout)
    toc()
    
    # Train neuralnet model
    mx.set.seed(0)
    tic("neuralnet training")
    nnModel <- neuralnet(y~x1+x2+x3, data=df[-test.ind, ], hidden=c(nHidden),
                         linear.output=TRUE, stepmax=1e6)
    toc()
    
    # Train caret model
    mx.set.seed(0)
    tic("nnet training")
    nnetModel <- nnet(y~x1+x2+x3, data=df[-test.ind, ], size=nHidden, trace=F,
                       linout=TRUE)
    toc()
    
    # Check response VS targets on training data:
    par(mfrow=c(2,2))
    plot(train.y, compute(nnModel, train.x)$net.result, 
         main="neuralnet Train Fitting Fake Data", xlab="Target", ylab="Response")
    abline(0,1, col="red")
    
    plot(train.y, predict(nnetModel, train.x), 
         main="nnet Train Fitting Fake Data", xlab="Target", ylab="Response")
    abline(0,1, col="red")
    
    plot(train.y, predict(mxModel1, train.x, array.layout=array.layout), 
         main="MXNET (no hidden) Train Fitting Fake Data", xlab="Target",
         ylab="Response")
    abline(0,1, col="red")
    
    plot(train.y, predict(mxModel2, train.x, array.layout=array.layout),
         main="MXNET (with hidden) Train Fitting Fake Data", xlab="Target",
         ylab="Response")
    abline(0,1, col="red")
    

1 个答案:

答案 0 :(得分:3)

我在mxnet github(link)中提出了同样的问题,uzhao非常友好地建议使用不同的优化方法,因此可以归功于他们。

使用&#34; rmsprop&#34;优化器,以及增加批量大小启用mxnet,以提供与此简单回归任务上的neuralnetnnet工具相当的性能。我还包括了线性lm回归的表现。

结果和自包含的示例代码包含在下面。我希望这对其他人(或将来我自己)有所帮助。

5个模型的均方根误差:

$mxModel1
[1] 0.1404579862

$mxModel2
[1] 0.03263213499

$nnet
[1] 0.03222651138

$neuralnet
[1] 0.03054112057

$linearModel
[1] 0.1404421006

图表显示mxnet回归的良好/合理性能(线性回归结果为绿色): mxnet regression results using rmsprop optimization

最后是这个自包含示例的代码:

## SIMPLE REGRESSION PROBLEM
# Check mxnet out-of-the-box performance VS neuralnet, and caret/nnet

library(mxnet)
library(neuralnet)
library(nnet)
library(caret)
library(tictoc)
library(reshape)

# Data definitions
nObservations <- 1000
noiseLvl <- 0.1

# Network config
nHidden <- 3
batchSize <- 100
nRound <- 400
verbose <- FALSE
array.layout = "rowmajor"
optimizer <- "rmsprop"

# GENERATE DATA:
set.seed(0)
df <- data.frame(x1=runif(nObservations),
                 x2=runif(nObservations),
                 x3=runif(nObservations))

df$y <- df$x1 + df$x2^2 + df$x3^3 + noiseLvl*runif(nObservations)
# normalize data columns
# df <- scale(df)

# Seperate data into train/test
test.ind = seq(1, nObservations, 10)    # 1 in 10 samples for testing
train.x = data.matrix(df[-test.ind, -which(colnames(df) %in% c("y"))])
train.y = df[-test.ind, "y"]
test.x = data.matrix(df[test.ind, -which(colnames(df) %in% c("y"))])
test.y = df[test.ind, "y"]

# Define mxnet network, following 5-minute regression example from here:
# http://mxnet-tqchen.readthedocs.io/en/latest//packages/r/fiveMinutesNeuralNetwork.html#regression
data <- mx.symbol.Variable("data")
label <- mx.symbol.Variable("label")
fc1 <- mx.symbol.FullyConnected(data, num_hidden=1, name="fc1")
lro1 <- mx.symbol.LinearRegressionOutput(data=fc1, label=label, name="lro")

# Train MXNET model
mx.set.seed(0)
tic("mxnet training 1")
mxModel1 <- mx.model.FeedForward.create(lro1, X=train.x, y=train.y,
                                        eval.data=list(data=test.x, label=test.y),
                                        ctx=mx.cpu(), num.round=nRound,
                                        array.batch.size=batchSize,
                                        eval.metric=mx.metric.rmse,
                                        verbose=verbose,
                                        array.layout=array.layout,
                                        optimizer=optimizer
                                        )
toc()

# Train network with a hidden layer
fc1 <- mx.symbol.FullyConnected(data, num_hidden=nHidden, name="fc1")
tanh1 <- mx.symbol.Activation(fc1, act_type="tanh", name="tanh1")
fc2 <- mx.symbol.FullyConnected(tanh1, num_hidden=1, name="fc2")
lro2 <- mx.symbol.LinearRegressionOutput(data=fc2, label=label, name="lro2")
tic("mxnet training 2")
mx.set.seed(0)
mxModel2 <- mx.model.FeedForward.create(lro2, X=train.x, y=train.y,
                                        eval.data=list(data=test.x, label=test.y),
                                        ctx=mx.cpu(), num.round=nRound,
                                        array.batch.size=batchSize,
                                        eval.metric=mx.metric.rmse,
                                        verbose=verbose,
                                        array.layout=array.layout,
                                        optimizer=optimizer
                                        )
toc()

# Train neuralnet model
set.seed(0)
tic("neuralnet training")
nnModel <- neuralnet(y~x1+x2+x3, data=df[-test.ind, ], hidden=c(nHidden),
                     linear.output=TRUE, stepmax=1e6)
toc()
# Train caret model
set.seed(0)
tic("nnet training")
nnetModel <- nnet(y~x1+x2+x3, data=df[-test.ind, ], size=nHidden, trace=F,
                   linout=TRUE)
toc()

# Check response VS targets on training data:
par(mfrow=c(2,2))
plot(train.y, compute(nnModel, train.x)$net.result, 
     main="neuralnet Train Fitting Fake Data", xlab="Target", ylab="Response")
abline(0,1, col="red")

# Plot linear model performance for reference
linearModel <- linearModel <- lm(y~., df[-test.ind, ])
points(train.y, predict(linearModel, data.frame(train.x)), col="green")

plot(train.y, predict(nnetModel, train.x), 
     main="nnet Train Fitting Fake Data", xlab="Target", ylab="Response")
abline(0,1, col="red")

plot(train.y, predict(mxModel1, train.x, array.layout=array.layout), 
     main="MXNET (no hidden) Train Fitting Fake Data", xlab="Target",
     ylab="Response")
abline(0,1, col="red")

plot(train.y, predict(mxModel2, train.x, array.layout=array.layout),
     main="MXNET (with hidden) Train Fitting Fake Data", xlab="Target",
     ylab="Response")
abline(0,1, col="red")

# Create and print table of results:
results <- list()
rmse <- function(target, response) {
  return(sqrt(mean((target - response)^2)))
}
results$mxModel1 <- rmse(train.y, predict(mxModel1, train.x,
                                          array.layout=array.layout))
results$mxModel2 <- rmse(train.y, predict(mxModel2, train.x,
                                          array.layout=array.layout))
results$nnet <- rmse(train.y, predict(nnetModel, train.x))
results$neuralnet <- rmse(train.y, compute(nnModel, train.x)$net.result)
results$linearModel <- rmse(train.y, predict(linearModel, data.frame(train.x)))

print(results)