gpt4 book ai didi

org.apache.crunch.types.writable.Writables.tableOf()方法的使用及代码示例

转载 作者:知者 更新时间:2024-03-27 06:29:05 26 4
gpt4 key购买 nike

本文整理了Java中org.apache.crunch.types.writable.Writables.tableOf()方法的一些代码示例,展示了Writables.tableOf()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Writables.tableOf()方法的具体详情如下:
包路径:org.apache.crunch.types.writable.Writables
类名称:Writables
方法名:tableOf

Writables.tableOf介绍

暂无

代码示例

代码示例来源:origin: cloudera/crunch

public <K, V> PTableType<K, V> tableOf(PType<K> key, PType<V> value) {
 return Writables.tableOf(key, value);
}

代码示例来源:origin: org.apache.crunch/crunch-core

public <K, V> PTableType<K, V> tableOf(PType<K> key, PType<V> value) {
 return Writables.tableOf(key, value);
}

代码示例来源:origin: org.apache.crunch/crunch

public <K, V> PTableType<K, V> tableOf(PType<K> key, PType<V> value) {
 return Writables.tableOf(key, value);
}

代码示例来源:origin: org.apache.crunch/crunch-core

private static <K extends Writable, V extends Writable> PTableType<K, V> tableOf(
  Class<K> keyClass, Class<V> valueClass) {
 return Writables.tableOf(Writables.writables(keyClass), Writables.writables(valueClass));
}

代码示例来源:origin: org.apache.crunch/crunch-core

private static <K extends Writable, V extends Writable> PTableType<K, V> tableOf(
  Class<K> keyClass, Class<V> valueClass) {
 return Writables.tableOf(Writables.writables(keyClass), Writables.writables(valueClass));
}

代码示例来源:origin: apache/crunch

/**
 * Extract information from hbase
 *
 * @param words the source from hbase
 * @return a {@code PTable} composed of the type of the input as key
 *         and its def as value
 */
public PTable<String, String> extractText(PTable<ImmutableBytesWritable, Result> words) {
 return words.parallelDo("Extract text", new DoFn<Pair<ImmutableBytesWritable, Result>, Pair<String, String>>() {
  @Override
  public void process(Pair<ImmutableBytesWritable, Result> row, Emitter<Pair<String, String>> emitter) {
   byte[] type = row.second().getValue(COLUMN_FAMILY_SOURCE, COLUMN_QUALIFIER_SOURCE_PLAY);
   byte[] def = row.second().getValue(COLUMN_FAMILY_SOURCE, COLUMN_QUALIFIER_SOURCE_QUOTE);
   if (type != null && def != null) {
    emitter.emit(new Pair<String, String>(Bytes.toString(type), Bytes.toString(def)));
   }
  }
 }, Writables.tableOf(Writables.strings(), Writables.strings()));
}

代码示例来源:origin: apache/crunch

public int run(String[] args) throws Exception {
 if (args.length != 2) {
  System.err.println();
  System.err.println("Two and only two arguments are accepted.");
  System.err.println("Usage: " + this.getClass().getName() + " [generic options] input output");
  System.err.println();
  GenericOptionsParser.printGenericCommandUsage(System.err);
  return 1;
 }
 // Create an object to coordinate pipeline creation and execution.
 Pipeline pipeline = new MRPipeline(TotalBytesByIP.class, getConf());
 // Reference a given text file as a collection of Strings.
 PCollection<String> lines = pipeline.readTextFile(args[0]);
 // Aggregator used for summing up response size
 Aggregator<Long> agg = Aggregators.SUM_LONGS();
 // Table of (ip, sum(response size))
 PTable<String, Long> ipAddrResponseSize = lines
   .parallelDo(extractIPResponseSize, Writables.tableOf(Writables.strings(), Writables.longs())).groupByKey()
   .combineValues(agg);
 pipeline.writeTextFile(ipAddrResponseSize, args[1]);
 // Execute the pipeline as a MapReduce.
 PipelineResult result = pipeline.done();
 return result.succeeded() ? 0 : 1;
}

代码示例来源:origin: apache/crunch

public int run(String[] args) throws Exception {
 if (args.length != 2) {
  System.err.println();
  System.err.println("Two and only two arguments are accepted.");
  System.err.println("Usage: " + this.getClass().getName() + " [generic options] input output");
  System.err.println();
  GenericOptionsParser.printGenericCommandUsage(System.err);
  return 1;
 }
 // Create an object to coordinate pipeline creation and execution.
 Pipeline pipeline = new MRPipeline(AverageBytesByIP.class, getConf());
 // Reference a given text file as a collection of Strings.
 PCollection<String> lines = pipeline.readTextFile(args[0]);
 // Aggregator used for summing up response size and count
 Aggregator<Pair<Long, Long>> agg = pairAggregator(SUM_LONGS(), SUM_LONGS());
 // Table of (ip, sum(response size), count)
 PTable<String, Pair<Long, Long>> remoteAddrResponseSize = lines
   .parallelDo(extractResponseSize,
     Writables.tableOf(Writables.strings(), Writables.pairs(Writables.longs(), Writables.longs()))).groupByKey()
   .combineValues(agg);
 // Calculate average response size by ip address
 PTable<String, Double> avgs = remoteAddrResponseSize.parallelDo(calulateAverage,
   Writables.tableOf(Writables.strings(), Writables.doubles()));
 // write the result to a text file
 pipeline.writeTextFile(avgs, args[1]);
 // Execute the pipeline as a MapReduce.
 PipelineResult result = pipeline.done();
 return result.succeeded() ? 0 : 1;
}

代码示例来源:origin: apache/crunch

return Pair.of(input, (Void) null);
}, tableOf(cells.getPType(), nulls()));

代码示例来源:origin: org.apache.crunch/crunch-hbase

return Pair.of(input, (Void) null);
}, tableOf(cells.getPType(), nulls()));

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com