“如何在keras + tensorflow中修复'InvalidArgumentError'?

时间:2019-05-15 12:09:10

标签: python tensorflow keras

当我在“ samples / shapes / shapes.py”中运行train_shapes.py时,出现如下错误:

  

回溯(最近一次通话最近):文件“ MaskRCNN-Unet / samples / shapes / train_shapes.py”,第274行,   在layers =“ heads”中)文件“ MaskRCNN-Unet / mrcnn / modelxmask.py”,第2844行,   在火车use_multiprocessing = True中,文件“ /usr/local/lib/python3.5/dist-packages/keras/legacy/interfaces.py”,第91行,   在包装器中返回func(* args,** kwargs)文件“ /usr/local/lib/python3.5/dist-packages/keras/engine/training.py”,第1413行,   在fit_generator initial_epoch = initial_epoch中)文件“ /usr/local/lib/python3.5/dist-packages/keras/engine/training_generator.py”,第214行,   在fit_generator class_weight = class_weight中)文件“ /usr/local/lib/python3.5/dist-packages/keras/engine/training.py”,第1213行,   在train_on_batch输出中= self.train_function(ins)文件“ /usr/local/lib/python3.5/dist-packages/keras/backend/tensorflow_backend.py”,行2715,在调用返回self._call(inputs)文件“ /usr/local/lib/python3.5/dist-packages/keras/backend/tensorflow_backend.py“,第2675行,   在_call fetched = self._callable_fn(* array_vals)中,文件“ /usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py”,行1439,在调用run_metadata_ptr中)文件“ / usr /local/lib/python3.5/dist-packages/tensorflow/python/framework/errors_impl.py“,第528行,   在出口c_api.TF_GetCode(self.status.status))tensorflow.python.framework.errors_impl.InvalidArgumentError:预期开始,结束和步幅为一维相等大小的张量,但形状为[2,1],[2,1 ]和[2]代替。   [[{{node DetectSmaskBbox / map / while / strided_slice_1}}]

我认为提示不明确,我无法理解并找到此错误,有人可以帮助我吗?预先感谢。

我试图注释掉代码中的DetectSmaskBbox层,并且它起作用了。因此,我认为DetectSmaskBbox层中一定有问题。

class DetectSmaskBbox(KE.Layer):
    def __init__(self, config, mode, **kwargs):
        super(DetectSmaskBbox, self).__init__(**kwargs)
        self.config = config
        self.mode = mode

    def call(self, inputs):
        input_image_meta, mrcnn_detections, smask = inputs
        xmask_smasks = self.detect_smask_bbox(input_image_meta, mrcnn_detections, smask)
        return xmask_smasks

    def detect_smask_bbox(self, image_metas, detections, smasks):
        """Map the bbox generated by mask-rcnn to smasks.

        images: List of image tensors.

        Returns a list of tensors. [masks1, mask2, mask3, ...]
        masks: [Batch, H, W, N] instance logits masks, different image has different numbers of masks.
        """

        instance_count = self.config.DETECTION_MAX_INSTANCES
        # Process detections
        results = []
        m = parse_image_meta_graph(image_metas)
        image_shape = m['image_shape'][0]
        print("the shape of image_shape is ", image_shape.get_shape().as_list())
        original_image_shape = m['original_image_shape'][0]
        print("the shape of original_image_shape is ", original_image_shape.get_shape().as_list())
        window = m['window'][0]
        for i in range(self.config.BATCH_SIZE):
            detections_per_img = detections[i]
            xmask_smasks = self.unmold_detections_to_smask(detections_per_img, smasks[i],  image_shape,
                                                           original_image_shape, window)
            # xmask_smasks: [num_instances, Height, Width]
            # padding
            padding = tf.maximum(instance_count - tf.shape(xmask_smasks)[0], 0)
            xmask_smasks = tf.pad(xmask_smasks, [(0, padding), (0, 0), (0, 0)])   # xmask_smasks: [instance_count, Height, Width]
            t = tf.split(xmask_smasks, instance_count, axis=0)    # t is a list of tensors with shape [1, Height, Width]
            xmask_smasks = tf.stack(t, axis=-1)                   # [1, Height, Width, instance_count]
            xmask_smasks = tf.squeeze(xmask_smasks, axis=0)       # xmask_smasks: [Height, Width, instance_count]
            results.append(xmask_smasks)
        results = tf.stack(results, axis=0)
        return results   # results: [Batch, Height, Width, instance_count]

    def unmold_detections_to_smask(self, detections, smask, original_image_shape,
                          image_shape, window):
        """Map the detections of one image that generated by mask-rcnn to smasks, set
        the values outside the bbox to be zeros.

        detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
        smask: [height, width]
        original_image_shape: [H, W, C] Original image shape before resizing
        image_shape: [H, W, C] Shape of the image after resizing and padding
        window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
                image is excluding the padding.

        Returns: full_masks of an image
        full_masks: [num_instances, Height, Width] Instance masks of smask. Height
        and Width equals to the height and width of original images.
        """
        # How many detections do we have?
        # Detections array is padded with zeros. Find the first class_id == 0.
        zero_ix = tf.where(tf.equal(detections[:, 4],0))
        # is_not_empty = tf.cond(tf.greater(tf.size(zero_ix), 0), lambda: 1, lambda: 0)
        # N = zero_ix[0][0] if is_not_empty else detections.get_shape[0]
        N = tf.cond(tf.greater(tf.size(zero_ix), 0), lambda: tf.cast(zero_ix[0][0], tf.int32), lambda: detections.get_shape()[0])

        # Extract boxes, class_ids, scores, and class-specific masks
        boxes = detections[:N, :4]
        # class_ids = tf.cast(detections[:N, 4], tf.int32)
        # scores = detections[:N, 5]

        # Translate normalized coordinates in the resized image to pixel
        # coordinates in the original image before resizing
        print("the shape of window is ", window.get_shape().as_list())
        print("the shape of image_shape is ", image_shape.get_shape().as_list())
        window = norm_boxes_graph(window, image_shape[:2])
        wy1, wx1, wy2, wx2 = tf.split(window, 4)
        shift = tf.concat([wy1, wx1, wy1, wx1], axis=-1)
        wh = wy2 - wy1  # window height
        ww = wx2 - wx1  # window width
        scale = tf.concat([wh, ww, wh, ww], axis=-1)
        # Convert boxes to normalized coordinates on the window
        boxes = tf.divide(boxes - shift, scale)
        # Convert boxes to pixel coordinates on the original image
        boxes = denorm_boxes_graph(boxes, original_image_shape[:2])

        # Filter out detections with zero area. Happens in early training when
        # network weights are still random
        include_ix = tf.where((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) > 0)
        include_ix = tf.squeeze(include_ix)
        # is_empty = tf.cond(tf.greater(tf.size(include_ix), 0), lambda: 1, lambda: 0)
        # if is_empty is not None:
        boxes = tf.gather(boxes, include_ix, axis=0)
        # class_ids = tf.gather(class_ids, include_ix, axis=0)
        # scores = tf.gather(scores, include_ix, axis=0)
        #N = class_ids.shape[0]

        # Resize masks to original image size and set boundary threshold.
        # full_masks = []
        fn = lambda box: utils.map_bbox_to_smask(smask, box, original_image_shape[:2])
        full_masks = tf.map_fn(fn, boxes, dtype=tf.float32)
        return full_masks

很抱歉,由于我不知道问题出在哪里,所以无法明确问题。但是可以在“ https://github.com/zlyx525/MaskRCNN-UNet”中找到完整的项目。非常感谢!

0 个答案:

没有答案