我正在尝试改编最近在R-Bloggers(https://www.r-bloggers.com/how-to-build-your-own-neural-network-from-scratch-in-r/)上发布的一些代码,这些代码产生了基本的神经网络。我可以使用该示例,但是当我尝试对其进行修改以使用自己的数据作为预测变量和结果变量(具有更多的列和行)时,它就会崩溃。
我得到的错误是:
Error in nn$layer1 %*% nn$weights2 : non-conformable arguments
我相信这是通过FeedForward函数实现的:
feedforward <- function(nn){
nn$layer1 <- sigmoid(nn$input %*% nn$weights1)
nn$output <- sigmoid(nn$layer1 %*% nn$weights2)
nn
}
在我的网络搜索中,我认为这是因为nn $ weights1和nn $ input的大小不正确。
所以我想我的问题是,任何人都可以告诉我如何修改以下代码,以便使其动态地适应x中的行数和列数以及y中的行数吗?
如果您需要进一步了解,请询问。预先感谢!
周杰伦
完整脚本:
## Build a Simple Neural Network
## Source: https://www.r-bloggers.com/how-to-build-your-own-neural-network-from-scratch-in-r/
## Libraries
library("ggplot2")
## Create the training data
## Predictor vars
x <- matrix(c(0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1), ncol = 3, byrow = TRUE)
## Outcomes
y <- c(0, 1, 1, 0)
## Check it
cbind(x, y)
## Create an object to store the state of the NN
## Random value for initial weights for layer 1
rand.vector <- runif(ncol(x) * nrow(x))
## Convert the vector into a matrix
rand.matrix <- matrix(rand.vector, nrow = ncol(x), ncol = nrow(x), byrow = TRUE)
## List to store the state of NN as trained
my.nn <- list(input = x,
weights1 = rand.matrix,
weights2 = matrix(runif(4), ncol = 1),
y = y,
output = matrix(rep(0, times = 4), ncol = 1))
## Activation Function
sigmoid <- function(x){
1.0 / (1.0 + exp(-x))
}
## Derivative of the activation function
sigmoid.derivative <- function(x){
x * (1.0 - x)
}
## Loss function
loss.function <- function(nn){
sum((nn$y - nn$output) ^ 2)
}
## Feedforward and Back Propogation
## In order to minimize the loss function we perform feedforward
## and backpropagation. Feedforward applies the activation function
## to the layers and produces a predicted outcome.
feed.forward <- function(nn){
nn$layer1 <- sigmoid(nn$input %*% nn$weights1)
nn$output <- sigmoid(nn$layer1 %*% nn$weights2)
nn
}
## Backpropagation takes the predicted outcome, resulting from the
## feedforward step, and adjust the layer weights to reduce the loss function
backprop <- function(nn){
## Application of the chain rule to find derivative of the loss function with
## respect to weights2 and weights1
d.weights2 <- (
t(nn$layer1) %*%
## 2 * (nn$y - nn$output) is the derivative of the sigmoid loss function
(2 * (nn$y - nn$output) *
sigmoid.derivative(nn$output))
)
d.weights1 <- (2 * (nn$y - nn$output) * sigmoid.derivative(nn$output)) %*%
t(nn$weights2)
d.weights1 <- d.weights1 * sigmoid.derivative(nn$layer1)
d.weights1 <- t(nn$input) %*% d.weights1
## Update the weights using the derivative (slope) of the loss function
nn$weights1 <- nn$weights1 + d.weights1
nn$weights2 <- nn$weights2 + d.weights2
nn
}
## Train the model
## number of times to perform feedforward and backpropagation
n <- 1500
## Data frame to store the results of the loss function. This data
## frame is used to produce the plot in the next code chunk
loss.df <- data.frame(iteration = 1:n, loss = vector("numeric", length = n))
for (i in seq_len(1500)) {
my.nn <- feed.forward(my.nn)
my.nn <- backprop(my.nn)
# store the result of the loss function. We will plot this later
loss.df$loss[i] <- loss.function(my.nn)
}
## Print the predicted outcome next to the actual outcome
data.frame("Predicted" = round(my.nn$output, 3), "Actual" = y)
## Plot the cost
ggplot(data = loss.df, aes(x = iteration, y = loss)) +
geom_line()