“我认为pyfunc_0返回的第0个值是double,但是期望float”

时间:2019-01-20 17:12:28

标签: python python-3.x tensorflow keras

整个代码和错误可以在这里看到- https://www.kaggle.com/pradhyo/keras-style-transfer-different-losses/data?scriptVersionId=9726696(页面中的错误大约是60%-黑色背景的较长部分)

即使wd_float()返回float,我认为下面的问题是_loss似乎是两倍。

我尝试从wd_float()返回int,还尝试在style_loss_wasserstein()中强制转换为int,以查看发生了什么,并且错误变为“ pyfunc_0返回的第0个值是int,但是期望float”。所以我最好的猜测是tf.py_func正在某种程度上将wd_float的输出转换为double。

from scipy.stats import wasserstein_distance
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.wasserstein_distance.html
# wasserstein_distance is supposed to return float based on docs
def wd_float(x, y):
    _wd = wasserstein_distance(x, y)
    return float(_wd)


def style_loss_wasserstein():
    ...
    _loss = tf.py_func(wd_float, [style_features, combination_features], tf.float32)
    _loss = K.cast(_loss, dtype='float32')
    print(f"first loss: {_loss}")
    return tf.convert_to_tensor(_loss, tf.float32)


...



from keras import backend as K
from scipy.optimize import fmin_l_bfgs_b


class Evaluator(object):
    ...
    def eval_loss_and_grads(self, x):
        ...
        outs = self.f_outputs([x]) # line causing the error
        loss_value = outs[0]
        ...

    def loss(self, x):
        assert self.loss_value is None
        loss_value, grad_values = self.eval_loss_and_grads(x)
        self.loss_value = loss_value
        self.grad_values = grad_values
        return self.loss_value          



outputs = [style_loss_wasserstein]
f_outputs = K.function([combination_image], outputs)
evaluator = Evaluator(f_outputs, img_nrows, img_ncols)



x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
                                 fprime=evaluator.grads, maxfun=20)

这是堆栈跟踪

---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
<ipython-input-24-fcc21e1fc999> in <module>()
     13                loss_fn,
     14                iterations,
---> 15                save_every)
     16 
     17 # Display each output iteration for a style

<ipython-input-16-2397e37b5e4f> in style_transfer(base_image_path, style_reference_image_path, result_prefix, loss_fn, iterations, save_every)
     47     for i in range(iterations):
     48         x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
---> 49                                          fprime=evaluator.grads, maxfun=20)
     50         print('Iteration ' + str(i) + ' loss value:', min_val)
     51         # save current generated image

/opt/conda/lib/python3.6/site-packages/scipy/optimize/lbfgsb.py in fmin_l_bfgs_b(func, x0, fprime, args, approx_grad, bounds, m, factr, pgtol, epsilon, iprint, maxfun, maxiter, disp, callback, maxls)
    197 
    198     res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
--> 199                            **opts)
    200     d = {'grad': res['jac'],
    201          'task': res['message'],

/opt/conda/lib/python3.6/site-packages/scipy/optimize/lbfgsb.py in _minimize_lbfgsb(fun, x0, args, jac, bounds, disp, maxcor, ftol, gtol, eps, maxfun, maxiter, iprint, callback, maxls, **unknown_options)
    333             # until the completion of the current minimization iteration.
    334             # Overwrite f and g:
--> 335             f, g = func_and_grad(x)
    336         elif task_str.startswith(b'NEW_X'):
    337             # new iteration

/opt/conda/lib/python3.6/site-packages/scipy/optimize/lbfgsb.py in func_and_grad(x)
    283     else:
    284         def func_and_grad(x):
--> 285             f = fun(x, *args)
    286             g = jac(x, *args)
    287             return f, g

/opt/conda/lib/python3.6/site-packages/scipy/optimize/optimize.py in function_wrapper(*wrapper_args)
    291     def function_wrapper(*wrapper_args):
    292         ncalls[0] += 1
--> 293         return function(*(wrapper_args + args))
    294 
    295     return ncalls, function_wrapper

<ipython-input-14-687064fde378> in loss(self, x)
     29     def loss(self, x):
     30         assert self.loss_value is None
---> 31         loss_value, grad_values = self.eval_loss_and_grads(x)
     32         self.loss_value = loss_value
     33         self.grad_values = grad_values

<ipython-input-14-687064fde378> in eval_loss_and_grads(self, x)
     19         else:
     20             x = x.reshape((1, self.img_nrows, self.img_ncols, 3))
---> 21         outs = self.f_outputs([x])
     22         loss_value = outs[0]
     23         if len(outs[1:]) == 1:

/opt/conda/lib/python3.6/site-packages/Keras-2.2.4-py3.6.egg/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2919                     return self._legacy_call(inputs)
   2920 
-> 2921             return self._call(inputs)
   2922         else:
   2923             if py_any(is_tensor(x) for x in inputs):

/opt/conda/lib/python3.6/site-packages/Keras-2.2.4-py3.6.egg/keras/backend/tensorflow_backend.py in _call(self, inputs)
   2877             fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
   2878         else:
-> 2879             fetched = self._callable_fn(*array_vals)
   2880         return fetched[:len(self.outputs)]
   2881 

/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in __call__(self, *args, **kwargs)
   1437           ret = tf_session.TF_SessionRunCallable(
   1438               self._session._session, self._handle, args, status,
-> 1439               run_metadata_ptr)
   1440         if run_metadata:
   1441           proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
    526             None, None,
    527             compat.as_text(c_api.TF_Message(self.status.status)),
--> 528             c_api.TF_GetCode(self.status.status))
    529     # Delete the underlying status object from memory otherwise it stays alive
    530     # as there is a reference to status from this from the traceback due to

InvalidArgumentError: 0-th value returned by pyfunc_0 is double, but expects float
     [[{{node PyFunc}} = PyFunc[Tin=[DT_FLOAT, DT_FLOAT], Tout=[DT_FLOAT], token="pyfunc_0", _device="/job:localhost/replica:0/task:0/device:CPU:0"](Reshape_20/_609, Reshape_21/_611)]]
     [[{{node PyFunc/_613}} = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device_incarnation=1, tensor_name="edge_103_PyFunc", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:GPU:0"]()]]                                     

3 个答案:

答案 0 :(得分:0)

我认为这里的问题是TensorFlow参数值将tensorflow图保留为int,但是您尝试使用float返回图。可以将tf.py_func中的参数从tf.float32更改为tf.int(或tf.int32,无论在pyhton中如何工作)都可以解决问题。

答案 1 :(得分:0)

今天我也遇到了类似的问题,但是在TF2中的自定义损失函数中。

对我来说,解决方案是在使用自定义numpy函数返回数据之前,先使用np.ndarray.astype函数将数据转换为np.float32类型,例如:

def custom_loss_function_loss2(y_true, y_pred):
   loss = np.random.random(50)
   return np.ndarray.astype(loss, np.float32)

@tf.function
def custom_loss_function(y_true, y_pred):
   loss1 = tf.keras.losses.binary_crossentropy(y_true, y_pred)
   loss2 = tf.numpy_function(custom_loss_function_loss2, [y_true, y_pred], np.float32)
   return loss1 + loss2

希望以后能以相同的错误帮助其他人。

答案 2 :(得分:0)

当我在tensorflow中使用py_func函数时,我也遇到了这个问题。

我通过将np.float修改为np.float32来解决