下面,我同时提供了代码和追溯。我正在处理一个非常不平衡的数据集,并认为将kappa得分用作损失函数将是一个好主意。我在网上浏览了所有内容,发现在tensorflow 2.0中没有可用的实现。我现在收到此错误,不知道如何解决。
有什么想法吗?
METRICS = [
keras.metrics.BinaryAccuracy(name='accuracy'),
]
# Config
input_shape = X_train.shape
num_classes = 1
epochs = 50
batch_size = 64
pos = labels.sum()
neg = len(labels)-labels.sum()
weight_for_0 = (1 / neg)*(pos+neg)/2.0
weight_for_1 = (1 / pos)*(pos+neg)/2.0
class_weight = {0: weight_for_0, 1: weight_for_1}
import sys
def get_cohen_kappa_score(weights=None):
def cohen_kappa_score(y_true, y_pred):
"""
Define your code here. You can now use `weights` directly
in this function
"""
size= tf.size(y_true)
def tf_count(t, val):
elements_equal_to_value = tf.equal(t, val)
as_ints = tf.cast(elements_equal_to_value, tf.int32)
count = tf.reduce_sum(as_ints)
return count
summed = tf.math.add(y_true, y_pred)
ones = tf_count(summed, 2)
zeros = tf_count(summed, 0)
po = tf.math.divide((zeros+ones),size)
true_1 = tf_count(y_true, 1)
true_0 = tf_count(y_true, 0)
pred_1 = tf_count(y_pred, 1)
pred_0 = tf_count(y_pred, 0)
p_t_1 = tf.math.divide(true_1, size)
p_p_1 = tf.math.divide(pred_1, size)
p_t_0 = tf.math.divide(true_0, size)
p_p_0 = tf.math.divide(pred_0, size)
p_1 = tf.math.multiply(p_t_1, p_p_1)
p_0 = tf.math.multiply(p_t_0, p_p_0)
p_e = tf.math.add(p_1, p_0)
num = tf.math.subtract(po, p_e)
denom = tf.math.subtract(np.float64(1), p_e)
kappa = tf.cast(tf.math.divide(num, denom), tf.float32)
print("kappa: ", kappa)
return kappa
return cohen_kappa_score
model = Sequential()
# Input layer
model.add(Dense(12,input_dim=8, activation= 'relu'))
# Hidden layer
model.add(Dense(12, kernel_initializer = 'glorot_uniform',activation = 'relu'))
model.add(Dense(
4, kernel_initializer = 'glorot_uniform',
activation = 'relu'))
# Output layer
model.add(Dense(
num_classes, kernel_initializer = 'glorot_uniform',
activation = 'sigmoid'))
# Compile model
model.compile(loss = get_cohen_kappa_score(weights=class_weight), optimizer = 'adam', metrics = [METRICS])
# # Fit model
model.fit(
X_train, y_train, validation_data = (X_test, y_test),
epochs = epochs, batch_size = batch_size, verbose = 0,
class_weight=class_weight
)
kappa: Tensor("loss_1/dense_8_loss/cohen_kappa_score/Cast_6:0", shape=(), dtype=float32)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-5-4d38a7aefed1> in <module>()
87 X_train, y_train, validation_data = (X_test, y_test),
88 epochs = epochs, batch_size = batch_size, verbose = 0,
---> 89 class_weight=class_weight
90 )
91 # Final evaluation of the model
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
1211 else:
1212 fit_inputs = x + y + sample_weights
-> 1213 self._make_train_function()
1214 fit_function = self.train_function
1215
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/engine/training.py in _make_train_function(self)
314 training_updates = self.optimizer.get_updates(
315 params=self._collected_trainable_weights,
--> 316 loss=self.total_loss)
317 updates = self.updates + training_updates
318
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name + '` call to the ' +
90 'Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in symbolic_fn_wrapper(*args, **kwargs)
73 if _SYMBOLIC_SCOPE.value:
74 with get_graph().as_default():
---> 75 return func(*args, **kwargs)
76 else:
77 return func(*args, **kwargs)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/optimizers.py in get_updates(self, loss, params)
502 @K.symbolic
503 def get_updates(self, loss, params):
--> 504 grads = self.get_gradients(loss, params)
505 self.updates = [K.update_add(self.iterations, 1)]
506
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/optimizers.py in get_gradients(self, loss, params)
91 grads = K.gradients(loss, params)
92 if any(x is None for x in grads):
---> 93 raise ValueError('An operation has `None` for gradient. '
94 'Please make sure that all of your ops have a '
95 'gradient defined (i.e. are differentiable). '
ValueError: An operation has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.