当模型中有两个步骤时,我想问一下如何打印成本值。这是我的代码。
## SE_1st Hidden layer
W1 = tf.get_variable("W1", shape=[(2*spl+1)*feature_dim,layer_width_SE], initializer=tf.constant_initializer(value=W1_SE))
Variable = tf.get_variable("b1", shape=[layer_width_SE], initializer=tf.constant_initializer(value=b1_SE))
L11 = tf.nn.relu(tf.matmul(X, W1) + Variable)
L11 = tf.nn.dropout(L11, keep_prob=keep_prob)
## SE_2nd Hidden layer
W2 = tf.get_variable("W2", shape=[layer_width_SE,layer_width_SE], initializer=tf.constant_initializer(value=W2_SE))
Variable_1 = tf.get_variable("b2", shape=[layer_width_SE], initializer=tf.constant_initializer(value=b2_SE))
L12 = tf.nn.relu(tf.matmul(L11, W2)+ Variable_1)
L12 = tf.nn.dropout(L12, keep_prob=keep_prob)
## SE_3rd Hidden layer
W3 = tf.get_variable("W3", shape=[layer_width_SE, layer_width_SE], initializer=tf.constant_initializer(value=W3_SE))
Variable_2 = tf.get_variable("b3", shape=[layer_width_SE], initializer=tf.constant_initializer(value=b3_SE))
L13 = tf.nn.relu(tf.matmul(L12, W3) + Variable_2)
L13 = tf.nn.dropout(L13, keep_prob=keep_prob)
## SE_4th Hidden layer
W4 = tf.get_variable("W4", shape=[layer_width_SE,layer_width_SE], initializer=tf.constant_initializer(value=W4_SE))
Variable_3 = tf.get_variable("b4", shape=[layer_width_SE], initializer=tf.constant_initializer(value=b4_SE))
L14 = tf.nn.relu(tf.matmul(L13, W4)+ Variable_3)
L14 = tf.nn.dropout(L14, keep_prob=keep_prob)
## enhanced_speech_output layer
W5 = tf.get_variable("W5", shape=[layer_width_SE,feature_dim], initializer=tf.constant_initializer(value=W5_SE))
Variable_4 = tf.get_variable("b5", shape=[feature_dim], initializer=tf.constant_initializer(value=b5_SE))
SE_hypothesis = tf.matmul(L14, W5) + Variable_4
########################STOI DNN#########################
SE_hypothesis_append = tf.reshape(SE_hypothesis, [(batch_size_SE/frames), (feature_dim*frames)])
Y_append = tf.reshape(Y, [(batch_size_SE/frames), (feature_dim*frames)])
feature = tf.concat([SE_hypothesis_append, Y_append],axis=1)
## STOI_1st Hidden layer
W21 = tf.get_variable("W21", shape=[feature_dim*frames*2,layer_width_STOI], initializer=tf.constant_initializer(value=W1_STOI))
b21 = tf.get_variable("b21", shape=[layer_width_STOI], initializer=tf.constant_initializer(value=b1_STOI))
L21 = tf.nn.relu(tf.matmul(feature, W21) + b21)
L21 = tf.nn.dropout(L21, keep_prob=keep_prob)
## STOI_2nd Hidden layer
W22 = tf.get_variable("W22", shape=[layer_width_STOI,layer_width_STOI/2], initializer=tf.constant_initializer(value=W2_STOI))
b22 = tf.get_variable("b22", shape=[layer_width_STOI/2], initializer=tf.constant_initializer(value=b2_STOI))
L22 = tf.nn.relu(tf.matmul(L21, W22)+ b22)
L22 = tf.nn.dropout(L22, keep_prob=keep_prob)
## STOI_3rd Hidden layer
W23 = tf.get_variable("W23", shape=[layer_width_STOI/2,layer_width_STOI/4], initializer=tf.constant_initializer(value=W3_STOI))
b23 = tf.get_variable("b23", shape=[layer_width_STOI/4], initializer=tf.constant_initializer(value=b3_STOI))
L23 = tf.nn.relu(tf.matmul(L22, W23) + b23)
L23 = tf.nn.dropout(L23, keep_prob=keep_prob)
## STOI_4th Hidden layer
W24 = tf.get_variable("W24", shape=[layer_width_STOI/4,layer_width_STOI/8], initializer=tf.constant_initializer(value=W4_STOI))
b24 = tf.get_variable("b24", shape=[layer_width_STOI/8], initializer=tf.constant_initializer(value=b4_STOI))
L24 = tf.nn.relu(tf.matmul(L23, W24)+ b24)
L24 = tf.nn.dropout(L24, keep_prob=keep_prob)
## enhanced_speech_output layer
W25 = tf.get_variable("W25", shape=[layer_width_STOI/8,1], initializer=tf.constant_initializer(value=W5_STOI))
b25 = tf.get_variable("b25", shape=[1], initializer=tf.constant_initializer(value=b5_STOI))
STOI_hypothesis = tf.matmul(L24, W25) + b25
########################Cost function and optimizer#########################
SE_var_list = [W1, W2, W3, W4, W5, Variable, Variable_1, Variable_2, Variable_3, Variable_4]
cost_SE = tf.reduce_mean(tf.square(Y - SE_hypothesis))
cost_STOI = tf.reduce_mean(tf.square(STOI_target - STOI_hypothesis))
cost = (1-lamda)*cost_SE + lamda*cost_STOI
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost, var_list = SE_var_list)
saver = tf.train.Saver()
所以我想知道的是cost_SE和cost_STOI的值,我可以设置lamda来最大化模型的效率。我尝试了几种方法,但是没有用。
feed_dict = {X: batch_con_x, Y: batch_con_y, STOI_target: STOI_maximum, keep_prob: 0.5}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
上面的代码仅显示了两个成本值之和,但是我想知道的是每个成本值。有什么解决办法吗?
答案 0 :(得分:0)
您可以在第一个sess.run参数中指定所需内容:
c, cost_SE_eval, cost_STOI_eval, _ = sess.run(
[cost, cost_SE, cost_STOI, optimizer], feed_dict=feed_dict)