我最近正在学习机器学习。所以我在keras和tensorflow(不使用keras)中制作一些cnn模型来练习技能。但keras和tensorflow的cnn结果并不相同。这很令人困惑。
keras结果为68%,张量流为66.6%
这里是tensorflow代码:
def convolution(self, inputs_fw):
inputs_fw = tf.nn.dropout(inputs_fw, keep_prob=self.dropout_keep_prob)
with tf.name_scope('forward_conv'):
fil1 = 2
fil2 = 3
conv1_fw = tf.layers.conv1d(inputs_fw,filters=100,kernel_size=fil1,padding="VALID",activation=tf.nn.tanh)#tanh
print "print conv1"
print conv1_fw
conv1_fw = conv1_fw + self.cbiases['convolution1']
pool_fw = tf.layers.max_pooling1d(inputs=conv1_fw,pool_size=self.max_sentence_len-fil1+1,strides=self.max_sentence_len-fil1+1)
print pool_fw
pool_fw_flat =tf.reshape(pool_fw, [-1,1*100])
conv1_fw2 = tf.layers.conv1d(inputs_fw,filters=100,kernel_size=fil2,padding="VALID",activation=tf.nn.tanh)#tanh
print "print conv12"
conv1_fw2 = conv1_fw2 + self.cbiases['convolution2']
print conv1_fw2
pool_fw2 = tf.layers.max_pooling1d(inputs=conv1_fw2,pool_size=self.max_sentence_len-fil2+1,strides=self.max_sentence_len-fil2+1)
print pool_fw2
pool_fw_flat2 =tf.reshape(pool_fw2, [-1,1*100])
output = tf.concat([pool_fw_flat,pool_fw_flat2],1)
predict = tf.matmul(output, self.weights['softmax_conv']) + self.biases['softmax_conv']
print predict.get_shape()
return predict
损失和准确性由:
with tf.name_scope('loss'):
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prob, labels=self.y))
with tf.name_scope('train'):
global_step = tf.Variable(0, name="tr_global_step", trainable=False)
optimizer = tf.train.AdadeltaOptimizer(learning_rate=self.learning_rate).minimize(cost, global_step=global_step)
with tf.name_scope('predict'):
correct_pred = tf.equal(tf.argmax(prob, 1), tf.argmax(self.y, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
accuracy = tf.reduce_sum(tf.cast(correct_pred, tf.int32))
我犯了一些错误吗? 非常感谢。
答案 0 :(得分:0)
原因可能是你的代码中的dropout层,它会随机对图层输出进行采样,尝试重新运行你的tensorflow代码两次,你也会得到不同的结果。
inputs_fw = tf.nn.dropout(inputs_fw, keep_prob=self.dropout_keep_prob)
答案 1 :(得分:0)
看来你是使用softmax
来获取概率
logits = tf.matmul(output, self.weights['softmax_conv']) + self.biases['softmax_conv']
predict = tf.nn.softmax(tf.matmul(output, self.weights['softmax_conv']) + self.biases['softmax_conv'])
# is training True/False
inputs_fw = tf.nn.dropout(inputs_fw, keep_prob=self.dropout_keep_prob, is_training=is_training)