光束写入Avro文件序列化错误

时间:2019-04-25 14:16:33

标签: google-cloud-dataflow avro apache-beam beam

我遵循了在Beam documentation中写入AVRO文件的示例。但这在Caused by: java.io.NotSerializableException: org.apache.avro.Schema$RecordSchema步骤给了我一个错误p.run().waitUntilFinish()。但是,如果我从一个AVRO文件读取并将其写入另一个AVRO输出,则可以正常工作。我的目标是从任何任意输入源写入AVRO文件。有没有人看到过类似的问题?您是如何解决的?

public class WriteAvro {

public interface CsvToAvroOptions extends PipelineOptions {

    @Description("Path of the file to read from")
    @Default.String("test.avro")
    String getInputFile();

    void setInputFile(String value);
}

static void run(CsvToAvroOptions options) throws IOException {
    final Schema schema = new Schema.Parser().parse(Resources.getResource("person.avsc").openStream());
    Pipeline p = Pipeline.create(options);
    // This works fine
    // PCollection<GenericRecord> input = p.apply(AvroIO.readGenericRecords(schema).from(options.getInputFile()));

    // This doesn't work
    PCollection<GenericRecord> input =
            p.apply("ReadLines", TextIO.read().from(options.getInputFile()))
                    .apply(ParDo.of(new DoFn<String, GenericRecord>() {
                        @ProcessElement
                        public void processElement(ProcessContext c) {
                            GenericRecord record = new GenericData.Record(schema);
                            record.put("name", "John Doe");
                            record.put("age", 42);
                            record.put("siblingnames", Lists.newArrayList("Jimmy", "Jane"));
                            c.output(record);
                        }
                    }))
                    .setCoder(AvroCoder.of(GenericRecord.class, schema));

    input.apply(AvroIO.writeGenericRecords(schema).to("prefix"));
    p.run().waitUntilFinish();
}


public static void main(String[] args) throws IOException {
    CsvToAvroOptions options =
            PipelineOptionsFactory.fromArgs(args).withValidation().as(CsvToAvroOptions.class);

    run(options);
}
}
  • 光束版本:2.11.0
  • Runner:直接领跑者

1 个答案:

答案 0 :(得分:1)

Schema不可序列化,从而导致此错误。 您可以将架构存储为文本并在DoFn设置时进行解析。

这是您的操作方式。


public interface CsvToAvroOptions extends PipelineOptions {

    @Description("Path of the file to read from")
    @Default.String("test.avro")
    String getInputFile();

    void setInputFile(String value);
}



  private static class ConstructAvroRecordsFn extends DoFn<String, GenericRecord> {

    private final String schemaJson;
    private Schema schema;

    ConstructAvroRecordsFn(Schema schema){
      schemaJson = schema.toString();
    }

    @Setup
    public void setup(){
      schema = new Schema.Parser().parse(schemaJson);
    }
    @ProcessElement
    public void processElement(ProcessContext c) {
      GenericRecord record = new GenericData.Record(schema);
      record.put("name", "John Doe");
      record.put("age", 42);
      record.put("siblingnames", Lists.newArrayList("Jimmy", "Jane"));
      c.output(record);
    }
  }

static void run(CsvToAvroOptions options) throws IOException {
    final Schema schema = new Schema.Parser().parse(Resources.getResource("person.avsc").openStream());
  Pipeline p = Pipeline.create(options);
    // This works fine
    // PCollection<GenericRecord> input = p.apply(AvroIO.readGenericRecords(schema).from(options.getInputFile()));

    // This doesn't work
    PCollection<GenericRecord> input =
            p.apply("ReadLines", TextIO.read().from(options.getInputFile()))
                    .apply(ParDo.of(new ConstructAvroRecordsFn(schema)))
                    .setCoder(AvroCoder.of(GenericRecord.class, schema));

    input.apply(AvroIO.writeGenericRecords(schema).to("prefix"));
    p.run().waitUntilFinish();
}


public static void main(String[] args) throws IOException {
    CsvToAvroOptions options =
            PipelineOptionsFactory.fromArgs(args).withValidation().as(CsvToAvroOptions.class);

    run(options);
}
}