gpt4 book ai didi

java - Google Cloud Dataflow 写入数据(TextIO 或 DatastoreIO)时出现问题

转载 作者:行者123 更新时间:2023-12-02 02:47:34 32 4
gpt4 key购买 nike

好的,大家。来自数据流新手的另一个数据流问题。 (这周刚开始玩它..)

我正在创建一个数据管道来接收产品名称列表并生成自动完成数据。数据处理部分似乎一切正常,但我遗漏了一些明显的东西,因为当我添加最后一个“.apply”以使用 DatastoreIO 或 TextIO 写出数据时,我在我的IDE 的内容如下:

“ParDo.SingleOutput>,Entity> 类型未定义方法 apply(DatastoreV1.Write)”

如果给了我一个选项,将强制转换添加到方法接收器,但这显然不是答案。在尝试写出数据之前我需要执行其他步骤吗?在尝试写入数据之前,我的最后一步是调用 Dataflow 的实体帮助程序,将我的 Pipeline 结构从 > 更改为 ,在我看来,这就像我需要写入数据存储区的内容。

过去几天我对这件事感到非常沮丧,我什至决定将数据写入一些 AVRO 文件,这样我就可以手动将其加载到数据存储中。想象一下,当我完成所有这些工作并在调用 TextIO 时在完全相同的位置出现完全相同的错误时,我是多么兴奋。这就是为什么我认为我一定在这里遗漏了一些非常明显的东西。

这是我的代码。我将其全部包含在内以供引用,但您可能只需要查看底部的 main[] 即可。任何投入将不胜感激!谢谢!

西蒙斯先生老

package com.client.autocomplete;

import com.client.autocomplete.AutocompleteOptions;


import com.google.datastore.v1.Entity;
import com.google.datastore.v1.Key;
import com.google.datastore.v1.Value;

import static com.google.datastore.v1.client.DatastoreHelper.makeKey;
import static com.google.datastore.v1.client.DatastoreHelper.makeValue;
import org.apache.beam.sdk.coders.DefaultCoder;

import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.PipelineResult;
import org.apache.beam.sdk.io.gcp.bigquery.BigQueryIO;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PCollectionList;
import com.google.api.services.bigquery.model.TableRow;
import com.google.common.base.MoreObjects;
import org.apache.beam.sdk.io.TextIO;
import org.apache.beam.sdk.io.gcp.datastore.DatastoreIO;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.transforms.PTransform;
import org.apache.beam.sdk.transforms.Create;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.MapElements;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.transforms.SimpleFunction;
import org.apache.beam.sdk.transforms.GroupByKey;
import org.apache.beam.sdk.transforms.DoFn.ProcessContext;
import org.apache.beam.sdk.transforms.DoFn.ProcessElement;
import org.apache.beam.sdk.extensions.jackson.ParseJsons;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.options.Default;
import org.apache.beam.sdk.options.Description;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.options.StreamingOptions;
import org.apache.beam.sdk.options.Validation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.List;
import java.util.ArrayList;

/*
* A simple Dataflow pipeline to create autocomplete data from a list of
* product names. It then loads that prefix data into Google Cloud Datastore for consumption by
* a Google Cloud Function. That function will take in a prefix and return a list of 10 product names
*
* Pseudo Code Steps
* 1. Load a list of product names from Cloud Storage
* 2. Generate prefixes for use with autocomplete, based on the product names
* 3. Merge the prefix data together with 10 products per prefix
* 4. Write that prefix data to the Cloud Datastore as a KV with a <String>, List<String> structure
*
*/

public class ClientAutocompletePipeline {
private static final Logger LOG = LoggerFactory.getLogger(ClientAutocompletePipeline.class);


/**
* A DoFn that keys each product name by all of its prefixes.
* This creates one row in the PCollection for each prefix<->product_name pair
*/
private static class AllPrefixes
extends DoFn<String, KV<String, String>> {
private final int minPrefix;
private final int maxPrefix;

public AllPrefixes(int minPrefix) {
this(minPrefix, 10);
}

public AllPrefixes(int minPrefix, int maxPrefix) {
this.minPrefix = minPrefix;
this.maxPrefix = maxPrefix;
}
@ProcessElement
public void processElement(ProcessContext c) {
String productName= c.element().toString();
for (int i = minPrefix; i <= Math.min(productName.length(), maxPrefix); i++) {
c.output(KV.of(productName.substring(0, i), c.element()));
}
}
}

/**
* Takes as input the top product names per prefix, and emits an entity
* suitable for writing to Cloud Datastore.
*
*/
static class FormatForDatastore extends DoFn<KV<String, List<String>>, Entity> {
private String kind;
private String ancestorKey;

public FormatForDatastore(String kind, String ancestorKey) {
this.kind = kind;
this.ancestorKey = ancestorKey;
}

@ProcessElement
public void processElement(ProcessContext c) {
// Initialize an EntityBuilder and get it a valid key
Entity.Builder entityBuilder = Entity.newBuilder();
Key key = makeKey(kind, ancestorKey).build();
entityBuilder.setKey(key);

// New HashMap to hold all the properties of the Entity
Map<String, Value> properties = new HashMap<>();
String prefix = c.element().getKey();
String productsString = "Products[";

// iterate through the product names and add each one to the productsString
for (String productName : c.element().getValue()) {
// products.add(productName);
productsString += productName + ", ";
}
productsString += "]";

properties.put("prefix", makeValue(prefix).build());
properties.put("products", makeValue(productsString).build());
entityBuilder.putAllProperties(properties);
c.output(entityBuilder.build());
}
}


/**
* Options supported by this class.
*
* <p>Inherits standard Beam example configuration options.
*/
public interface Options
extends AutocompleteOptions {
@Description("Input text file")
@Validation.Required
String getInputFile();
void setInputFile(String value);

@Description("Cloud Datastore entity kind")
@Default.String("prefix-product-map")
String getKind();
void setKind(String value);

@Description("Whether output to Cloud Datastore")
@Default.Boolean(true)
Boolean getOutputToDatastore();
void setOutputToDatastore(Boolean value);

@Description("Cloud Datastore ancestor key")
@Default.String("root")
String getDatastoreAncestorKey();
void setDatastoreAncestorKey(String value);

@Description("Cloud Datastore output project ID, defaults to project ID")
String getOutputProject();
void setOutputProject(String value);
}


public static void main(String[] args) throws IOException{

Options options = PipelineOptionsFactory.fromArgs(args).withValidation().as(Options.class);

// create the pipeline
Pipeline p = Pipeline.create(options);

PCollection<String> toWrite = p

// A step to read in the product names from a text file on GCS
.apply(TextIO.read().from("gs://sample-product-data/clean_product_names.txt"))

// Next expand the product names into KV pairs with prefix as key (<KV<String, String>>)
.apply("Explode Prefixes", ParDo.of(new AllPrefixes(2)))

// Apply a GroupByKey transform to the PCollection "flatCollection" to create "productsGroupedByPrefix".
.apply(GroupByKey.<String, String>create())

// Now format the PCollection for writing into the Google Datastore
.apply("FormatForDatastore", ParDo.of(new FormatForDatastore(options.getKind(),
options.getDatastoreAncestorKey()))

// Write the processed data to the Google Cloud Datastore
// NOTE: This is the line that I'm getting the error on!!
.apply(DatastoreIO.v1().write().withProjectId(MoreObjects.firstNonNull(
options.getOutputProject(), options.getOutputProject()))));

// Run the pipeline.
PipelineResult result = p.run();
}
}

最佳答案

我认为你需要另一个右括号。我删除了一些无关的位并根据括号重新缩进:

PCollection<String> toWrite = p
.apply(TextIO.read().from("..."))
.apply("Explode Prefixes", ...)
.apply(GroupByKey.<String, String>create())
.apply("FormatForDatastore", ParDo.of(new FormatForDatastore(
options.getKind(), options.getDatastoreAncestorKey()))
.apply(...);

具体来说,您需要另一个括号来关闭 apply("FormatForDatastore", ...)。现在,它正在尝试调用 ParDo.of(...).apply(...) ,但不起作用。

关于java - Google Cloud Dataflow 写入数据(TextIO 或 DatastoreIO)时出现问题,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/44360908/

32 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com