写入数据的Google Cloud Dataflow问题(TextIO或DatastoreIO)

时间:2017-06-05 02:29:18

标签: java google-cloud-datastore google-cloud-dataflow

好的,大家好。来自Dataflow新手的另一个Dataflow问题。 (本周刚开始玩它..)

我正在创建一个数据管道,以获取产品名称列表并生成自动填充数据。看起来,数据处理部分工作正常,但是我遗漏了一些明显的东西,因为当我添加我的最后一个“.apply”使用DatastoreIO或TextIO来写出数据时,我的语法错误IDE说出以下内容:

“方法apply(DatastoreV1.Write)未定义为ParDo.SingleOutput>类型,实体>”

如果给我一个选项,为方法接收器添加一个强制转换器,但这显然不是答案。在尝试写出数据之前,是否需要执行其他步骤?在尝试编写数据之前,我的最后一步是调用实体帮助程序,以便Dataflow从>更改我的管道结构。 to,在我看来,我需要写入Datastore。

最近几天我对这件事感到非常沮丧,我甚至决定将数据写入一些AVRO文件,所以我可以手动将其加载到Datastore中。想象一下,当我完成所有这些工作时,我是多么感觉到,并且在我调用TextIO的同一个地方得到了完全相同的错误。这就是为什么我认为我必须在这里遗漏一些非常明显的东西。

这是我的代码。我把它全部包括在内作为参考,但你可能只需要查看底部的main []。任何投入将不胜感激!谢谢!

MrSimmonsSr

package com.client.autocomplete;

import com.client.autocomplete.AutocompleteOptions;


import com.google.datastore.v1.Entity;
import com.google.datastore.v1.Key;
import com.google.datastore.v1.Value;

import static com.google.datastore.v1.client.DatastoreHelper.makeKey;
import static com.google.datastore.v1.client.DatastoreHelper.makeValue;
import org.apache.beam.sdk.coders.DefaultCoder;

import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.PipelineResult;
import org.apache.beam.sdk.io.gcp.bigquery.BigQueryIO;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PCollectionList;
import com.google.api.services.bigquery.model.TableRow;
import com.google.common.base.MoreObjects;
import org.apache.beam.sdk.io.TextIO;
import org.apache.beam.sdk.io.gcp.datastore.DatastoreIO;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.transforms.PTransform;
import org.apache.beam.sdk.transforms.Create;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.MapElements;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.transforms.SimpleFunction;
import org.apache.beam.sdk.transforms.GroupByKey;
import org.apache.beam.sdk.transforms.DoFn.ProcessContext;
import org.apache.beam.sdk.transforms.DoFn.ProcessElement;
import org.apache.beam.sdk.extensions.jackson.ParseJsons;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.options.Default;
import org.apache.beam.sdk.options.Description;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.options.StreamingOptions;
import org.apache.beam.sdk.options.Validation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.List;
import java.util.ArrayList;

/*
 * A simple Dataflow pipeline to create autocomplete data from a list of
 * product names. It then loads that prefix data into Google Cloud Datastore for consumption by
 * a Google Cloud Function. That function will take in a prefix and return a list of 10 product names
 * 
 * Pseudo Code Steps
 * 1. Load a list of product names from Cloud Storage
 * 2. Generate prefixes for use with autocomplete, based on the product names
 * 3. Merge the prefix data together with 10 products per prefix
 * 4. Write that  prefix data to the Cloud Datastore as a KV with a <String>, List<String> structure
 * 
 */

public class ClientAutocompletePipeline {
    private static final Logger LOG = LoggerFactory.getLogger(ClientAutocompletePipeline.class);


    /**
     * A DoFn that keys each product name by all of its prefixes.
     * This creates one row in the PCollection for each prefix<->product_name pair
     */
    private static class AllPrefixes
    extends DoFn<String, KV<String, String>> {
        private final int minPrefix;
        private final int maxPrefix;

        public AllPrefixes(int minPrefix) {
            this(minPrefix, 10);
        }

        public AllPrefixes(int minPrefix, int maxPrefix) {
            this.minPrefix = minPrefix;
            this.maxPrefix = maxPrefix;
        }
        @ProcessElement
        public void processElement(ProcessContext c) {
            String productName= c.element().toString();
            for (int i = minPrefix; i <= Math.min(productName.length(), maxPrefix); i++) {
                c.output(KV.of(productName.substring(0, i), c.element()));
            }
        }
    }

    /**
     * Takes as input the top product names per prefix, and emits an entity
     * suitable for writing to Cloud Datastore.
     *
     */
    static class FormatForDatastore extends DoFn<KV<String, List<String>>, Entity> {
        private String kind;
        private String ancestorKey;

        public FormatForDatastore(String kind, String ancestorKey) {
            this.kind = kind;
            this.ancestorKey = ancestorKey;
        }

        @ProcessElement
        public void processElement(ProcessContext c) {
            // Initialize an EntityBuilder and get it a valid key
            Entity.Builder entityBuilder = Entity.newBuilder();
            Key key = makeKey(kind, ancestorKey).build();
            entityBuilder.setKey(key);

            // New HashMap to hold all the properties of the Entity
            Map<String, Value> properties = new HashMap<>();
            String prefix = c.element().getKey();
            String productsString = "Products[";

            // iterate through the product names and add each one to the productsString
            for (String productName : c.element().getValue()) {
                // products.add(productName);
                productsString += productName + ", ";
            }
            productsString += "]";

            properties.put("prefix", makeValue(prefix).build());            
            properties.put("products", makeValue(productsString).build());
            entityBuilder.putAllProperties(properties);
            c.output(entityBuilder.build());
        }
    }


    /**
     * Options supported by this class.
     *
     * <p>Inherits standard Beam example configuration options.
     */
    public interface Options
    extends AutocompleteOptions {
        @Description("Input text file")
        @Validation.Required
        String getInputFile();
        void setInputFile(String value);

        @Description("Cloud Datastore entity kind")
        @Default.String("prefix-product-map")
        String getKind();
        void setKind(String value);

        @Description("Whether output to Cloud Datastore")
        @Default.Boolean(true)
        Boolean getOutputToDatastore();
        void setOutputToDatastore(Boolean value);

        @Description("Cloud Datastore ancestor key")
        @Default.String("root")
        String getDatastoreAncestorKey();
        void setDatastoreAncestorKey(String value);

        @Description("Cloud Datastore output project ID, defaults to project ID")
        String getOutputProject();
        void setOutputProject(String value);
    }


    public static void main(String[] args)  throws IOException{

        Options options = PipelineOptionsFactory.fromArgs(args).withValidation().as(Options.class);

        //  create the pipeline  
        Pipeline p = Pipeline.create(options);

        PCollection<String> toWrite = p

            // A step to read in the product names from a text file on GCS
            .apply(TextIO.read().from("gs://sample-product-data/clean_product_names.txt"))

            // Next expand the product names into KV pairs with prefix as key (<KV<String, String>>)
            .apply("Explode Prefixes", ParDo.of(new AllPrefixes(2)))

            // Apply a GroupByKey transform to the PCollection "flatCollection" to create "productsGroupedByPrefix".
            .apply(GroupByKey.<String, String>create())

            // Now format the PCollection for writing into the Google Datastore
            .apply("FormatForDatastore", ParDo.of(new FormatForDatastore(options.getKind(),
                    options.getDatastoreAncestorKey())) 

            // Write the processed data to the Google Cloud Datastore
            // NOTE: This is the line that I'm getting the error on!!
            .apply(DatastoreIO.v1().write().withProjectId(MoreObjects.firstNonNull(
                    options.getOutputProject(), options.getOutputProject()))));

        // Run the pipeline.
        PipelineResult result = p.run();
    }
}

1 个答案:

答案 0 :(得分:2)

我认为你需要另一个右括号。我根据括号删除了一些无关的内容并重新加注:

PCollection<String> toWrite = p
    .apply(TextIO.read().from("..."))
    .apply("Explode Prefixes", ...)
    .apply(GroupByKey.<String, String>create())
    .apply("FormatForDatastore", ParDo.of(new FormatForDatastore(
      options.getKind(), options.getDatastoreAncestorKey()))
        .apply(...);

具体来说,您需要另一个括号来关闭apply("FormatForDatastore", ...)。现在,它试图致电ParDo.of(...).apply(...),但这并不起作用。