我正在尝试使用tensorlfow.metrics.recall
和tensorlfow.metrics.precision
,但张量流会引发FailedPreconditionError
。请注意,我使用了sess.run(tf.global_variables_initializer())
代码如下:
import numpy as np
import tensorflow as tf
y_true = np.array([[1, 1, 0], [0, 1, 1], [0, 0, 1]])
y_pred = np.array([[1, 0, 0], [1, 0, 1], [0, 0, 1]])
predictions = tf.placeholder(tf.int64, shape=[3, 3])
labels = tf.placeholder(tf.int64, shape=[3, 3])
prec = tf.metrics.precision(labels, predictions)
rec = tf.metrics.recall(labels, predictions)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
p, r = sess.run([prec, rec], feed_dict={predictions: y_pred, labels: y_true})
print("precision: {}, recall: {}".format(p, r))
答案 0 :(得分:1)
def recall_at_k(y_true, y_pred):
"""Recall metric.
Computes the recall over the whole batch using threshold_value from k-th percentile.
"""
###
threshold_value = # calculate value of k-th percentile of y_pred here
###
# Adaptation of the "round()" used before to get the predictions. Clipping to make sure that the predicted raw values are between 0 and 1.
y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value), K.floatx())
# Compute the number of true positives. Rounding in prevention to make sure we have an integer.
true_positives = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))
# Compute the number of positive targets.
possible_positives = K.sum(K.clip(y_true, 0, 1))
recall_ratio = true_positives / (possible_positives + K.epsilon())
return recall_ratio
和tf.metrics.precision
在内部创建tf.metrics.recall
(使用local variable
创建的变量)。所以你需要做:collections=[tf.GraphKeys.LOCAL_VARIABLES]
。