使用R中的CNN MXnet进行标量输出的图像识别

时间:2017-07-29 00:02:26

标签: r image-processing conv-neural-network image-recognition mxnet

所以我尝试使用CN中的mxnet包使用CNN来尝试根据图像预测标量输出(在我的情况下等待时间)。

然而,当我这样做时,我得到相同的结果输出(它预测相同的数字,这可能只是所有结果的平均值)。如何让它正确预测标量输出。

此外,我的图像已经通过灰度化并转换为下面的像素格式进行了预处理。 我本质上是使用图像来预测等待时间,这就是为什么我的train_y是以秒为单位的当前等待时间,因此为什么我没有将它转换为[0,1]范围。我更喜欢回归类型输出或某种标量输出,它根据图像输出预测的等待时间。

您建议采用哪些其他方法来解决此问题,不确定我的方法是否正确。

这是我可重现的代码:

set.seed(0)

df <- data.frame(replicate(784,runif(7538)))
df$waittime <- 1000*runif(7538)


training_index <- createDataPartition(df$waittime, p = .9, times = 1)
training_index <- unlist(training_index)

train_set <- df[training_index,]
dim(train_set)
test_set <- df[-training_index,]
dim(test_set)


## Fix train and test datasets
train_data <- data.matrix(train_set)
train_x <- t(train_data[, -785])
train_y <- train_data[,785]
train_array <- train_x
dim(train_array) <- c(28, 28, 1, ncol(train_array))


test_data <- data.matrix(test_set)
test_x <- t(test_set[,-785])
test_y <- test_set[,785]
test_array <- test_x
dim(test_array) <- c(28, 28, 1, ncol(test_x))




library(mxnet)
## Model
mx_data <- mx.symbol.Variable('data')
## 1st convolutional layer 5x5 kernel and 20 filters.
conv_1 <- mx.symbol.Convolution(data = mx_data, kernel = c(5, 5), num_filter = 20)
tanh_1 <- mx.symbol.Activation(data = conv_1, act_type = "tanh")
pool_1 <- mx.symbol.Pooling(data = tanh_1, pool_type = "max", kernel = c(2, 2), stride = c(2,2 ))
## 2nd convolutional layer 5x5 kernel and 50 filters.
conv_2 <- mx.symbol.Convolution(data = pool_1, kernel = c(5,5), num_filter = 50)
tanh_2 <- mx.symbol.Activation(data = conv_2, act_type = "tanh")
pool_2 <- mx.symbol.Pooling(data = tanh_2, pool_type = "max", kernel = c(2, 2), stride = c(2, 2))
## 1st fully connected layer
flat <- mx.symbol.Flatten(data = pool_2)
fcl_1 <- mx.symbol.FullyConnected(data = flat, num_hidden = 500)
tanh_3 <- mx.symbol.Activation(data = fcl_1, act_type = "tanh")
## 2nd fully connected layer
fcl_2 <- mx.symbol.FullyConnected(data = tanh_3, num_hidden = 1)
## Output
#NN_model <- mx.symbol.SoftmaxOutput(data = fcl_2)
label <- mx.symbol.Variable("label")
#NN_model <- mx.symbol.MakeLoss(mx.symbol.square(mx.symbol.Reshape(fcl_2, shape = 0) - label))
NN_model <- mx.symbol.LinearRegressionOutput(fcl_2)


## Device used. Sadly not the GPU :-(
#device <- mx.gpu
#Didn't work well, predicted same number continuously regardless of image
## Train on 1200 samples
model <- mx.model.FeedForward.create(NN_model, X = train_array, y = train_y,
                                     #                                     ctx = device,
                                     num.round = 30,
                                     array.batch.size = 100,
                                     initializer=mx.init.uniform(0.002), 
                                     learning.rate = 0.00001,
                                     momentum = 0.9,
                                     wd = 0.00001,
                                     eval.metric = mx.metric.rmse)
                                     epoch.end.callback = mx.callback.log.train.metric(100))



pred <- predict(model, test_array)
#gives the same numeric output 

2 个答案:

答案 0 :(得分:1)

由于存在许多潜力,您的网络似乎正在崩溃。我会尝试以下修改:

  • 使用ReLU激活而不是tanh。事实证明,ReLU在Conv网络中比sigmoid或tanh更强大。
  • 卷积层输入之间的用户批量标准化(参见文章here)。
  • 将您的范围划分为多个部分并使用softmax。如果必须进行回归,请考虑为每个范围建立单独的回归网络,并根据softmax的输出选择正确的回归网络。交叉熵损失在学习高度非线性函数方面取得了更大的成功。

答案 1 :(得分:0)

只需修改一下代码即可。 train_y也位于[0,1]和initializer = mx.init.Xavier(factor_type = "in", magnitude = 2.34)

library(caret)

set.seed(0)

df <- data.frame(replicate(784, runif(7538)))
df$waittime <- runif(7538)

training_index <- createDataPartition(df$waittime, p = .9, times = 1)
training_index <- unlist(training_index)

train_set <- df[training_index, ]
dim(train_set)
test_set <- df[-training_index, ]
dim(test_set)

## Fix train and test datasets
train_data <- data.matrix(train_set)
train_x <- t(train_data[,-785])
train_y <- train_data[, 785]
train_array <- train_x
dim(train_array) <- c(28, 28, 1, ncol(train_array))

test_data <- data.matrix(test_set)
test_x <- t(test_set[, -785])
test_y <- test_set[, 785]
test_array <- test_x
dim(test_array) <- c(28, 28, 1, ncol(test_x))

library(mxnet)
## Model
mx_data <- mx.symbol.Variable('data')
## 1st convolutional layer 5x5 kernel and 20 filters.
conv_1 <- mx.symbol.Convolution(data = mx_data, kernel = c(5, 5), num_filter = 20)
tanh_1 <- mx.symbol.Activation(data = conv_1, act_type = "tanh")
pool_1 <- mx.symbol.Pooling(data = tanh_1, pool_type = "max", kernel = c(2, 2), stride = c(2, 2))
## 2nd convolutional layer 5x5 kernel and 50 filters.
conv_2 <- mx.symbol.Convolution(data = pool_1, kernel = c(5, 5), num_filter = 50)
tanh_2 <- mx.symbol.Activation(data = conv_2, act_type = "tanh")
pool_2 <- mx.symbol.Pooling(data = tanh_2, pool_type = "max", kernel = c(2, 2), stride = c(2, 2))
## 1st fully connected layer
flat <- mx.symbol.Flatten(data = pool_2)
fcl_1 <- mx.symbol.FullyConnected(data = flat, num_hidden = 500)
tanh_3 <- mx.symbol.Activation(data = fcl_1, act_type = "tanh")
## 2nd fully connected layer
fcl_2 <- mx.symbol.FullyConnected(data = tanh_3, num_hidden = 1)
## Output
#NN_model <- mx.symbol.SoftmaxOutput(data = fcl_2)
label <- mx.symbol.Variable("label")
#NN_model <- mx.symbol.MakeLoss(mx.symbol.square(mx.symbol.Reshape(fcl_2, shape = 0) - label))
NN_model <- mx.symbol.LinearRegressionOutput(fcl_2)

mx.set.seed(0)
model <- mx.model.FeedForward.create(NN_model,
                                     X = train_array,
                                     y = train_y,
                                     num.round = 4,
                                     array.batch.size = 64,
                                     initializer = mx.init.Xavier(factor_type = "in", magnitude = 2.34),
                                     learning.rate = 0.00001,
                                     momentum = 0.9,
                                     wd = 0.00001,
                                     eval.metric = mx.metric.rmse)

pred <- predict(model, test_array)

pred[1,1:10]
# [1] 0.4859098 0.4865469 0.5671642 0.5729486 0.5008956 0.4962234 0.4327411 0.5478653 0.5446281 0.5707113