在尝试训练pet数据集时从训练脚本解析错误

时间:2017-08-18 09:15:06

标签: python parsing tensorflow object-detection

我最初在tensorflow github上发布了这个,但我在这里复制它是因为它更合适。

我需要你的宝贵帮助,因为我有点困在一些奇怪的配置文件解析错误。我正在尝试在pet数据集上运行本地 train.py脚本。我已经按照教程中的说法完成了准备数据和配置文件的所有内容,这是我的目录结构:

+pets_example
    +data
       -pet_label_map.pbtxt
       -pet_train.record
       -pet_val.record
    +models
       +ssd_inception_v2_coco_11_06_2017 
          -frozen_inference_graph.pb
          -graph.pbtxt
          -model.ckpt.data-00000-of-00001
          -model.ckpt.index
          -model.ckpt.meta
          -pipeline.proto

以下是我的管道配置文件的内容:

syntax = "proto2";

package object_detection.protos;

import "object_detection/protos/eval.proto";
import "object_detection/protos/input_reader.proto";
import "object_detection/protos/model.proto";
import "object_detection/protos/train.proto";

// Convenience message for configuring a training and eval pipeline. Allows all
// of the pipeline parameters to be configured from one file.
message TrainEvalPipelineConfig {
  optional DetectionModel model = 1;
  optional TrainConfig train_config = 2;
  optional InputReader train_input_reader = 3;
  optional EvalConfig eval_config = 4;
  optional InputReader eval_input_reader = 5;
}

model {
  ssd {
    num_classes: 37
    box_coder {
      faster_rcnn_box_coder {
        y_scale: 10.0
        x_scale: 10.0
        height_scale: 5.0
        width_scale: 5.0
      }
    }
    matcher {
      argmax_matcher {
        matched_threshold: 0.5
        unmatched_threshold: 0.5
        ignore_thresholds: false
        negatives_lower_than_unmatched: true
        force_match_for_each_row: true
      }
    }
    similarity_calculator {
      iou_similarity {
      }
    }
    anchor_generator {
      ssd_anchor_generator {
        num_layers: 6
        min_scale: 0.2
        max_scale: 0.95
        aspect_ratios: 1.0
        aspect_ratios: 2.0
        aspect_ratios: 0.5
        aspect_ratios: 3.0
        aspect_ratios: 0.3333
        reduce_boxes_in_lowest_layer: true
      }
    }
    image_resizer {
      fixed_shape_resizer {
        height: 300
        width: 300
      }
    }
    box_predictor {
      convolutional_box_predictor {
        min_depth: 0
        max_depth: 0
        num_layers_before_predictor: 0
        use_dropout: false
        dropout_keep_probability: 0.8
        kernel_size: 3
        box_code_size: 4
        apply_sigmoid_to_scores: false
        conv_hyperparams {
          activation: RELU_6,
          regularizer {
            l2_regularizer {
              weight: 0.00004
            }
          }
          initializer {
            truncated_normal_initializer {
              stddev: 0.03
              mean: 0.0
            }
          }
        }
      }
    }
    feature_extractor {
      type: 'ssd_inception_v2'
      min_depth: 16
      depth_multiplier: 1.0
      conv_hyperparams {
        activation: RELU_6,
        regularizer {
          l2_regularizer {
            weight: 0.00004
          }
        }
        initializer {
          truncated_normal_initializer {
            stddev: 0.03
            mean: 0.0
          }
        }
        batch_norm {
          train: true,
          scale: true,
          center: true,
          decay: 0.9997,
          epsilon: 0.001,
        }
      }
    }
    loss {
      classification_loss {
        weighted_sigmoid {
          anchorwise_output: true
        }
      }
      localization_loss {
        weighted_smooth_l1 {
          anchorwise_output: true
        }
      }
      hard_example_miner {
        num_hard_examples: 3000
        iou_threshold: 0.99
        loss_type: CLASSIFICATION
        max_negatives_per_positive: 3
        min_negatives_per_image: 0
      }
      classification_weight: 1.0
      localization_weight: 1.0
    }
    normalize_loss_by_num_matches: true
    post_processing {
      batch_non_max_suppression {
        score_threshold: 1e-8
        iou_threshold: 0.6
        max_detections_per_class: 100
        max_total_detections: 100
      }
      score_converter: SIGMOID
    }
  }
}

train_config: {
  batch_size: 24
  optimizer {
    rms_prop_optimizer: {
      learning_rate: {
        exponential_decay_learning_rate {
          initial_learning_rate: 0.004
          decay_steps: 800720
          decay_factor: 0.95
        }
      }
      momentum_optimizer_value: 0.9
      decay: 0.9
      epsilon: 1.0
    }
  }
  fine_tune_checkpoint: "pets_example/models/ssd_inception_v2_coco_11_06_2017/model.ckpt"
  from_detection_checkpoint: true
  data_augmentation_options {
    random_horizontal_flip {
    }
  }
  data_augmentation_options {
    ssd_random_crop {
    }
  }
}

train_input_reader: {
  tf_record_input_reader {
    input_path: "pets_example/data/pet_train.record"
  }
  label_map_path: "pets_example/data/pet_label_map.pbtxt"
}

eval_config: {
  num_examples: 2000
}

eval_input_reader: {
  tf_record_input_reader {
    input_path: "pets_example/data/pet_val.record"
  }
  label_map_path: "pets_example/data/pet_label_map.pbtxt"
}

我正在使用Linux Ubuntu 16.04,我有一个pixorflow 1.1.0的pip安装版本(没有GPU支持,我在CPU上运行所有内容)。

所以当我尝试命令时: python3 object_detection/train.py --logtostderr --pipeline_config_path='pets_example/models/ssd_inception_v2_coco_11_06_2017/pipeline.proto' --train_dir='pets_example/models/train_events/'

我收到以下错误:

Traceback (most recent call last):
  File "object_detection/train.py", line 201, in <module>
    tf.app.run()
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/platform/app.py", line 48, in run
    _sys.exit(main(_sys.argv[:1] + flags_passthrough))
  File "object_detection/train.py", line 146, in main
    model_config, train_config, input_config = get_configs_from_pipeline_file()
  File "object_detection/train.py", line 106, in get_configs_from_pipeline_file
    text_format.Merge(a, pipeline_config)
  File "/usr/local/lib/python3.5/dist-packages/google/protobuf/text_format.py", line 481, in Merge
    descriptor_pool=descriptor_pool)
  File "/usr/local/lib/python3.5/dist-packages/google/protobuf/text_format.py", line 535, in MergeLines
    return parser.MergeLines(lines, message)
  File "/usr/local/lib/python3.5/dist-packages/google/protobuf/text_format.py", line 568, in MergeLines
    self._ParseOrMerge(lines, message)
  File "/usr/local/lib/python3.5/dist-packages/google/protobuf/text_format.py", line 583, in _ParseOrMerge
    self._MergeField(tokenizer, message)
  File "/usr/local/lib/python3.5/dist-packages/google/protobuf/text_format.py", line 657, in _MergeField
    (message_descriptor.full_name, name))
google.protobuf.text_format.ParseError: 1:1 : Message type "object_detection.protos.TrainEvalPipelineConfig" has no field named "syntax".

我通过不同的文件追踪错误,并在查看描述符时快速耗尽技能......如果有人可以提供任何帮助,那就太棒了! :) 在此先感谢您的帮助!

1 个答案:

答案 0 :(得分:0)

您不应该拥有配置文件的顶部 - 尝试删除以下所有内容并重试:


    syntax = "proto2";

    package object_detection.protos;

    import "object_detection/protos/eval.proto";
    import "object_detection/protos/input_reader.proto";
    import "object_detection/protos/model.proto";
    import "object_detection/protos/train.proto";

    // Convenience message for configuring a training and eval pipeline. Allows all
    // of the pipeline parameters to be configured from one file.
    message TrainEvalPipelineConfig {
      optional DetectionModel model = 1;
      optional TrainConfig train_config = 2;
      optional InputReader train_input_reader = 3;
      optional EvalConfig eval_config = 4;
      optional InputReader eval_input_reader = 5;
    }

此外,您的pipeline_config_path不应该指向其中一个.proto文件(定义protos的架构) - 他们应该指向我们在samples/configs目录中的.config文件