# input place holders
X = tf.placeholder(tf.float32, [batch_size, feature_dim*frames*2])
Y = tf.placeholder(tf.float32, [batch_size, 1])
# 1st Hidden layer
W21 = tf.get_variable("W21", shape=[frames*2*feature_dim,layer_Width], initializer=tf.contrib.layers.xavier_initializer())
b21 = tf.get_variable("b21", shape=[layer_Width], initializer=tf.contrib.layers.xavier_initializer())
L1 = tf.nn.relu(tf.matmul(X, W21) + b21)
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
# 2nd Hidden layer
W22 = tf.get_variable("W22", shape=[layer_Width,layer_Width], initializer=tf.contrib.layers.xavier_initializer())
b22 = tf.get_variable("b22", shape=[layer_Width], initializer=tf.contrib.layers.xavier_initializer())
L2 = tf.nn.relu(tf.matmul(L1, W22)+ b22)
L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
# 3rd Hidden layer
W23 = tf.get_variable("W23", shape=[layer_Width,layer_Width], initializer=tf.contrib.layers.xavier_initializer())
b23 = tf.get_variable("b23", shape=[layer_Width], initializer=tf.contrib.layers.xavier_initializer())
L3 = tf.nn.relu(tf.matmul(L2, W23)+ b23)
L3 = tf.nn.dropout(L3, keep_prob=keep_prob)
# 4th Hidden layer
W24 = tf.get_variable("W24", shape=[layer_Width,layer_Width], initializer=tf.contrib.layers.xavier_initializer())
b24 = tf.get_variable("b24", shape=[layer_Width], initializer=tf.contrib.layers.xavier_initializer())
L4 = tf.nn.relu(tf.matmul(L3, W24)+ b24)
L4 = tf.nn.dropout(L4, keep_prob=keep_prob)
# output layer
W25 = tf.get_variable("W25", shape=[layer_Width,1], initializer=tf.contrib.layers.xavier_initializer())
b25 = tf.get_variable("b25", shape=[1], initializer=tf.contrib.layers.xavier_initializer())
hypothesis = 5*(tf.nn.softmax(tf.matmul(L4, W25) + b25)) - 0.5
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
我想要这个模型做的是计算模型参数的边际概率。假设神经网络模型为初始M。此模型的参数theta分布为p(theta | M)。所以我想做的就是将此代码设置为p(theta | M),但我在网上找不到任何解决方案。
此外,一旦我知道如何写上面的等式,这些参数将通过p(X | theta,M)确定数据X的分布。这是我最终要应用优化程序所要使用的。