我正在尝试使用keras.layer使用变分推理和贝叶斯神经网络,但是当我导入为变分推理编写的类时,会出现值错误。
我可以使用tf.layer.dense而不会出现错误,但是我想使用变分推理。 这是带有tf.layer和tun的代码,没有任何错误:
SELECT * from Table
WHERE (`Year` <> "2019" OR `Quarter Name` NOT IN ("QTR3", "QTR4"))
这是我编写的类,在导入它时会导致错误:
h_1 = tf.layers.dense(inputs=inputs,units=128, activation=tf.nn.leaky_relu,kernel_regularizer=regularizer)
h_1_a=tf.layers.dense(inputs=action,units=128, activation=tf.nn.leaky_relu, kernel_regularizer=regularizer)
h_1_concat=tf.concat(axis=1,values=[h_1,h_1_a])
h_2 = tf.layers.dense(inputs=h_1_concat, units=64, activation=tf.nn.leaky_relu, kernel_regularizer=regularizer)
h_3 = tf.layers.dense(inputs=h_2, units=16, activation=tf.nn.leaky_relu, kernel_regularizer=regularizer)
out = tf.layers.dense(inputs=h_3, units=1, kernel_regularizer=regularizer)
这是我使用此类的部分:
def mixture_prior_params(sigma_1, sigma_2, pi, return_sigma=False):
params = K.variable([sigma_1, sigma_2, pi], name='mixture_prior_params')
sigma = np.sqrt(pi * sigma_1 ** 2 + (1 - pi) * sigma_2 ** 2) #VI
return params, sigma
def log_mixture_prior_prob(w):
comp_1_dist = tf.distributions.Normal(0.0, prior_params[0])
comp_2_dist = tf.distributions.Normal(0.0, prior_params[1])
comp_1_weight = prior_params[2]
return K.log(comp_1_weight * comp_1_dist.prob(w) + (1 - comp_1_weight) * comp_2_dist.prob(w))
prior_params, prior_sigma = mixture_prior_params(sigma_1=1.5, sigma_2=0.5, pi=0.3)
class DenseVariational(Layer):
def __init__(self, output_dim, kl_loss_weight, activation=None, **kwargs):
self.output_dim = output_dim
self.kl_loss_weight = kl_loss_weight
self.activation = activations.get(activation)
super().__init__(**kwargs)
def build(self, input_shape):
self._trainable_weights.append(prior_params)
self.kernel_mu = self.add_weight(name='kernel_mu',
shape=(input_shape[0][1], self.output_dim),
initializer=initializers.normal(stddev=prior_sigma),
trainable=True)
self.bias_mu = self.add_weight(name='bias_mu',
shape=(self.output_dim,),
initializer=initializers.normal(stddev=prior_sigma),
trainable=True)
self.kernel_rho = self.add_weight(name='kernel_rho',
shape=(input_shape[0][1], self.output_dim),
initializer=initializers.constant(0.0),
trainable=True)
self.bias_rho = self.add_weight(name='bias_rho',
shape=(self.output_dim,),
initializer=initializers.constant(0.0),
trainable=True)
super().build(input_shape)
def call(self, x):
sys.exit()
kernel_sigma = tf.math.softplus(self.kernel_rho)
kernel = self.kernel_mu + kernel_sigma * tf.random.normal(self.kernel_mu.shape)
bias_sigma = tf.math.softplus(self.bias_rho)
bias = self.bias_mu + bias_sigma * tf.random.normal(self.bias_mu.shape)
self.add_loss(self.kl_loss(kernel, self.kernel_mu, kernel_sigma) +
self.kl_loss(bias, self.bias_mu, bias_sigma))
return self.activation(K.dot(x, kernel) + bias)
def compute_output_shape(self, input_shape):
#sys.exit()
return (input_shape[0][1], self.output_dim)
def kl_loss(self, w, mu, sigma):
variational_dist = tf.distributions.Normal(mu, sigma)
return kl_loss_weight * K.sum(variational_dist.log_prob(w) - log_mixture_prior_prob(w)) `
我输入的形状为[7,128],但我无法理解错误:
_inputs=tf.concat(axis=1,values=[inputs, action])
x_in = Input(shape=(7,))
x = DenseVariational(128, kl_loss_weight=kl_loss_weight, activation='relu')(x_in)
x = DenseVariational(64, kl_loss_weight=kl_loss_weight, activation='relu')(x)
x = DenseVariational(64, kl_loss_weight=kl_loss_weight, activation='relu')(x)
x = DenseVariational(16, kl_loss_weight=kl_loss_weight, activation='relu')(x)
x = DenseVariational(3, kl_loss_weight=kl_loss_weight)(x)
model = Model(x_in,x)
out = model(x)