我想在Keras中将MDA定义为损失函数:
我在TensorFlow中有一个例子似乎有效:
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
def mda(y, y_hat):
c = tf.equal(tf.sign(y[1:] - y[:-1]), tf.sign(y_hat[1:] - y_hat[:-1]))
return tf.reduce_mean(tf.cast(c, tf.float32))
y = np.array([0, 1, 2, 1, 0, 1])
y_hat = np.array([0, 1, 0, 1, 0, 1])
plt.plot(y)
plt.plot(y_hat, alpha=.6)
plt.show()
sess = tf.Session()
print(sess.run(mda(tf.constant(y), tf.constant(y_hat))))
相应的情节:
mda
结果:0.6是3/5并且在这种情况下有意义,因为正确预测了5个方向中的3个。
但是,如果我尝试在Keras中实现此代码:
def mda(y_true, y_pred):
"""Compute Mean Directional Accuracy.
https://en.wikipedia.org/wiki/Mean_Directional_Accuracy_(MDA)
Parameters
----------
y_true : tensor
y_pred : tensor
Returns
-------
mda : tensor
"""
s = K.equal(K.sign(y_true[1:] - y_true[:-1]),
K.sign(y_pred[1:] - y_pred[:-1]))
return K.mean(K.cast(s, K.floatx()))
我得到ValueError: None values not supported.
,我无法解决。
日志:
ValueError Traceback (most recent call last)
<ipython-input-101-b8bdd1fcaea1> in <module>()
67 callbacks=[reduce_lr],
68 validation_data=(teX, teY),
---> 69 verbose=1)
70 print("Runtime:", time.time() - start)
71
/anaconda/lib/python3.6/site-packages/keras/models.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
861 class_weight=class_weight,
862 sample_weight=sample_weight,
--> 863 initial_epoch=initial_epoch)
864
865 def evaluate(self, x, y, batch_size=32, verbose=1,
/anaconda/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
1411 else:
1412 ins = x + y + sample_weights
-> 1413 self._make_train_function()
1414 f = self.train_function
1415
/anaconda/lib/python3.6/site-packages/keras/engine/training.py in _make_train_function(self)
935 self._collected_trainable_weights,
936 self.constraints,
--> 937 self.total_loss)
938 updates = self.updates + training_updates
939 # Gets loss and metrics. Updates weights at each call.
/anaconda/lib/python3.6/site-packages/keras/optimizers.py in get_updates(self, params, constraints, loss)
231 for p, g, a in zip(params, grads, accumulators):
232 # update accumulator
--> 233 new_a = self.rho * a + (1. - self.rho) * K.square(g)
234 self.updates.append(K.update(a, new_a))
235 new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
/anaconda/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in square(x)
1369 A tensor.
1370 """
-> 1371 return tf.square(x)
1372
1373
/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py in square(x, name)
472 indices=x.indices, values=x_square, dense_shape=x.dense_shape)
473 else:
--> 474 return gen_math_ops.square(x, name=name)
475
476
/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/gen_math_ops.py in square(x, name)
2731 A `Tensor`. Has the same type as `x`.
2732 """
-> 2733 result = _op_def_lib.apply_op("Square", x=x, name=name)
2734 return result
2735
/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py in apply_op(self, op_type_name, name, **keywords)
502 # What type does convert_to_tensor think it has?
503 observed = ops.internal_convert_to_tensor(
--> 504 values, as_ref=input_arg.is_ref).dtype.name
505 prefix = ("Input '%s' of '%s' Op has type %s that does not match" %
506 (input_name, op_type_name, observed))
/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype)
714
715 if ret is None:
--> 716 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
717
718 if ret is NotImplemented:
/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
174 as_ref=False):
175 _ = as_ref
--> 176 return constant(v, dtype=dtype, name=name)
177
178
/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name, verify_shape)
163 tensor_value = attr_value_pb2.AttrValue()
164 tensor_value.tensor.CopyFrom(
--> 165 tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape, verify_shape=verify_shape))
166 dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
167 const_tensor = g.create_op(
/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py in make_tensor_proto(values, dtype, shape, verify_shape)
358 else:
359 if values is None:
--> 360 raise ValueError("None values not supported.")
361 # if dtype is provided, forces numpy array to be the type
362 # provided if possible.
ValueError: None values not supported.
我也无法使用TensorFlow重现此错误,例如我尝试用y_hat
替换其中一个None
值但会引发不同的错误。
我不确定ValueError
是否意味着在运行时期间生成了某种None
值,或者我搞砸了Keras实现。我真的很感激任何帮助。
答案 0 :(得分:0)
我尝试了你提供的代码,它在我的电脑上运行良好。
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import keras as K
def mda(y_true, y_pred):
s = K.equal(K.sign(y_true[1:] - y_true[:-1]),
K.sign(y_pred[1:] - y_pred[:-1]))
return K.mean(K.cast(s, K.floatx()))
y = np.array([0, 1, 2, 1, 0, 1])
y_hat = np.array([0, 1, 0, 1, 0, 1])
plt.plot(y)
plt.plot(y_hat, alpha=.6)
plt.show()
sess = tf.Session()
print(sess.run(mda(tf.constant(y), tf.constant(y_hat)))
你的Tensorflow / Keras设置是什么?
答案 1 :(得分:0)
MAD不可微,您的梯度为None