我在使用R的Keras接口在单词嵌入模型中实现自定义层时遇到了一些麻烦。在我的代码的玩具版本下,该代码会重现该错误:
config.h
执行“输出”层后出现以下错误:
rm(list = ls())
library(keras)
library(tensorflow)
# ================================
# inputs
# ================================
input_target <- layer_input(shape = 1)
input_context <- layer_input(shape = 1)
# embedding matrix for mean vectors
embedding_mu <- layer_embedding(
input_dim = 1000,
output_dim = 50,
embeddings_initializer = initializer_random_uniform(),
input_length = 1,
name = "embedding_mu"
)
# embedding matrix for sigma vectors
embedding_sigma <- layer_embedding(
input_dim = 1000,
output_dim = 50,
embeddings_initializer = initializer_random_uniform(),
input_length = 1,
name = "embedding_sigma"
)
# select target mu from the mu embedding matrix
target_vector_mu <- input_target %>%
embedding_mu() %>%
layer_flatten()
# select context mu from the mu embedding matrix
context_vector_mu <- input_context %>%
embedding_mu() %>%
layer_flatten()
# select target sigma from the sigma embedding matrix
target_vector_sigma <- input_target %>%
embedding_sigma() %>%
layer_flatten()
# select context sigma from the sigma embedding matrix
context_vector_sigma <- input_context %>%
embedding_sigma() %>%
layer_flatten()
# ================================
# custom layer
# ================================
KLenergy <- function(args){ # args <- list(mu_w, mu_c, sigma_w, sigma_c)
comp1 <- tf$reduce_sum(tf$div(args[[4]], args[[3]]), axis = integer(1))
comp2 <- tf$reduce_sum(tf$div(tf$square(tf$subtract(args[[1]], args[[2]])), args[[3]]), axis = integer(1))
comp3 <- tf$subtract(tf$log(tf$reduce_prod(args[[4]], axis = integer(1))), tf$log(tf$reduce_prod(args[[3]], axis = integer(1))))
energy <- 0.5*(comp1 + comp2 - comp3)
return(energy)
}
kl_energy <- layer_lambda(list(target_vector_mu,
context_vector_mu,
target_vector_sigma,
context_vector_sigma),
KLenergy)
output <- layer_dense(kl_energy, units = 1, activation = "relu")
# ================================
# model compile
# ================================
model <- keras_model(list(input_target, input_context), output)
model %>% compile(
loss = "binary_crossentropy",
optimizer = "Adagrad")
summary(model)
我希望kl_energy层具有形状(None,1),但我却得到(None,)。
Error in py_call_impl(callable, dots$args, dots$keywords) :
ValueError: Input 0 is incompatible with layer dense_2: expected min_ndim=2, found ndim=1
Detailed traceback:
File "/anaconda3/envs/r-tensorflow/lib/python3.6/site-packages/keras/engine/base_layer.py", line 414, in __call__
self.assert_input_compatibility(inputs)
File "/anaconda3/envs/r-tensorflow/lib/python3.6/site-packages/keras/engine/base_layer.py", line 327, in assert_input_compatibility
str(K.ndim(x)))
在定义自定义图层时是否缺少参数?我尝试设置“ keepdims = TRUE”:
kl_energy
Tensor("lambda_5/Mul:0", shape=(?,), dtype=float32)
但这给了我一个形状为(1,None)的kl_energy层,这不是我想要的。最终,该层的输出应与原始word2vec模型(使用layer_dot-see here)中的形状相同,但使用以下自定义层:
KLenergy <- function(args){ # args <- list(mu_w, mu_c, sigma_w, sigma_c)
comp1 <- tf$reduce_sum(tf$div(args[[4]], args[[3]]), axis = as.integer(1), keepdims = TRUE)
comp2 <- tf$reduce_sum(tf$div(tf$square(tf$subtract(args[[1]], args[[2]])), args[[3]]), axis = as.integer(1), keepdims = TRUE)
comp3 <- tf$subtract(tf$log(tf$reduce_prod(args[[4]], axis = as.integer(1), keepdims = TRUE)), tf$log(tf$reduce_prod(args[[3]], axis = as.integer(1), keepdims = TRUE)))
energy <- 0.5*(comp1 + comp2 - comp3)
return(energy)
}
任何指导将不胜感激。
工作代码(以下归功于Daniel):
kl_energy
Tensor("lambda_7/Mul:0", shape=(1, ?), dtype=float32)
答案 0 :(得分:0)
请不要忘记使用keepdims=TRUE
返回(None,1)
。
并且如上所述,您应该使用as.integer(1)
。