M1 MacBook Apple ML 计算 Tensorflow2.4 与 Numpy 的兼容性问题

时间:2021-04-01 16:48:56

标签: python-3.x numpy tensorflow keras

我正在运行新的苹果原生 tensorflow 包 2.4,遇到了一个我以前没有遇到的问题。这个 jupyter notebook 代码适用于旧的基于英特尔的环境,其中使用了旧的 tensorflow 版本。但与 M1 苹果 MLcomputer TensorFlow2.4 不兼容 使用 Numpy 1.20 或 1.18(我将 numpy 降级尝试)。错误日志:

NotImplementedError: Cannot convert a symbolic Tensor (lstm_1/strided_slice:0) to a numpy array. This error may indicate that you're trying to pass a Tensor to a NumPy call, which is not supported
    ---------------------------------------------------------------------------
    NotImplementedError                       Traceback (most recent call last)
    <ipython-input-20-73358e637fe3> in <module>
          4 model = Sequential()
          5 model.add(Embedding(vocab_size+1, W2V_SIZE, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False))
    ----> 6 model.add(LSTM(500, dropout=0.2, recurrent_dropout=0.2))
          7 model.add(Dense(units = 10000, kernel_initializer = 'glorot_uniform', activation = 'relu'))
          8 model.add(Dropout(0.35))
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
        515     self._self_setattr_tracking = False  # pylint: disable=protected-access
        516     try:
    --> 517       result = method(self, *args, **kwargs)
        518     finally:
        519       self._self_setattr_tracking = previous_value  # pylint: disable=protected-access
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/sequential.py in add(self, layer)
        221       # If the model is being built continuously on top of an input layer:
        222       # refresh its output.
    --> 223       output_tensor = layer(self.outputs[0])
        224       if len(nest.flatten(output_tensor)) != 1:
        225         raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in __call__(self, inputs, initial_state, constants, **kwargs)
        658 
        659     if initial_state is None and constants is None:
    --> 660       return super(RNN, self).__call__(inputs, **kwargs)
        661 
        662     # If any of `initial_state` or `constants` are specified and are Keras
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
        944     # >> model = tf.keras.Model(inputs, outputs)
        945     if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
    --> 946       return self._functional_construction_call(inputs, args, kwargs,
        947                                                 input_list)
        948 
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in _functional_construction_call(self, inputs, args, kwargs, input_list)
       1083           layer=self, inputs=inputs, build_graph=True, training=training_value):
       1084         # Check input assumptions set after layer building, e.g. input shape.
    -> 1085         outputs = self._keras_tensor_symbolic_call(
       1086             inputs, input_masks, args, kwargs)
       1087 
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs)
        815       return nest.map_structure(keras_tensor.KerasTensor, output_signature)
        816     else:
    --> 817       return self._infer_output_signature(inputs, args, kwargs, input_masks)
        818 
        819   def _infer_output_signature(self, inputs, args, kwargs, input_masks):
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in _infer_output_signature(self, inputs, args, kwargs, input_masks)
        856           # TODO(kaftan): do we maybe_build here, or have we already done it?
        857           self._maybe_build(inputs)
    --> 858           outputs = call_fn(inputs, *args, **kwargs)
        859 
        860         self._handle_activity_regularization(inputs, outputs)
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent_v2.py in call(self, inputs, mask, training, initial_state)
       1161     # LSTM does not support constants. Ignore it during process.
       1162     orig_initial_state = initial_state
    -> 1163     inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None)
       1164 
       1165     if isinstance(mask, list):
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in _process_inputs(self, inputs, initial_state, constants)
        857         initial_state = self.states
        858     elif initial_state is None:
    --> 859       initial_state = self.get_initial_state(inputs)
        860 
        861     if len(initial_state) != len(self.states):
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in get_initial_state(self, inputs)
        640     dtype = inputs.dtype
        641     if get_initial_state_fn:
    --> 642       init_state = get_initial_state_fn(
        643           inputs=None, batch_size=batch_size, dtype=dtype)
        644     else:
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in get_initial_state(self, inputs, batch_size, dtype)
       2504 
       2505   def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
    -> 2506     return list(_generate_zero_filled_state_for_cell(
       2507         self, inputs, batch_size, dtype))
       2508 
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype)
       2985     batch_size = array_ops.shape(inputs)[0]
       2986     dtype = inputs.dtype
    -> 2987   return _generate_zero_filled_state(batch_size, cell.state_size, dtype)
       2988 
       2989 
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in _generate_zero_filled_state(batch_size_tensor, state_size, dtype)
       3001 
       3002   if nest.is_nested(state_size):
    -> 3003     return nest.map_structure(create_zeros, state_size)
       3004   else:
       3005     return create_zeros(state_size)
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/util/nest.py in map_structure(func, *structure, **kwargs)
        657 
        658   return pack_sequence_as(
    --> 659       structure[0], [func(*x) for x in entries],
        660       expand_composites=expand_composites)
        661 
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/util/nest.py in <listcomp>(.0)
        657 
        658   return pack_sequence_as(
    --> 659       structure[0], [func(*x) for x in entries],
        660       expand_composites=expand_composites)
        661 
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in create_zeros(unnested_state_size)
       2998     flat_dims = tensor_shape.TensorShape(unnested_state_size).as_list()
       2999     init_state_size = [batch_size_tensor] + flat_dims
    -> 3000     return array_ops.zeros(init_state_size, dtype=dtype)
       3001 
       3002   if nest.is_nested(state_size):
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
        199     """Call target, and fall back on dispatchers if there is a TypeError."""
        200     try:
    --> 201       return target(*args, **kwargs)
        202     except (TypeError, ValueError):
        203       # Note: convert_to_eager_tensor currently raises a ValueError, not a
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in wrapped(*args, **kwargs)
       2817 
       2818   def wrapped(*args, **kwargs):
    -> 2819     tensor = fun(*args, **kwargs)
       2820     tensor._is_zeros_tensor = True
       2821     return tensor
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in zeros(shape, dtype, name)
       2866           # Create a constant if it won't be very big. Otherwise create a fill
       2867           # op to prevent serialized GraphDefs from becoming too large.
    -> 2868           output = _constant_if_small(zero, shape, dtype, name)
       2869           if output is not None:
       2870             return output
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in _constant_if_small(value, shape, dtype, name)
       2802 def _constant_if_small(value, shape, dtype, name):
       2803   try:
    -> 2804     if np.prod(shape) < 1000:
       2805       return constant(value, shape=shape, dtype=dtype, name=name)
       2806   except TypeError:
    <__array_function__ internals> in prod(*args, **kwargs)
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/numpy/core/fromnumeric.py in prod(a, axis, dtype, out, keepdims, initial, where)
       3028     10
       3029     """
    -> 3030     return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
       3031                           keepdims=keepdims, initial=initial, where=where)
       3032 
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/numpy/core/fromnumeric.py in _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs)
         85                 return reduction(axis=axis, out=out, **passkwargs)
         86 
    ---> 87     return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
         88 
         89 
    ~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/framework/ops.py in __array__(self)
        850 
        851   def __array__(self):
    --> 852     raise NotImplementedError(
        853         "Cannot convert a symbolic Tensor ({}) to a numpy array."
        854         " This error may indicate that you're trying to pass a Tensor to"
    NotImplementedError: Cannot convert a symbolic Tensor (lstm_1/strided_slice:0) to a numpy array. This error may indicate that you're trying to pass a Tensor to a NumPy call, which is not supported

0 个答案:

没有答案