gpt4 book ai didi

hadoop - 在 java map reduce 中选择不同的查询

转载 作者:可可西里 更新时间:2023-11-01 15:05:24 25 4
gpt4 key购买 nike

10001|76884|1995-06-24|1996-06-23
10001|76884|1995-06-24|1996-06-23
10001|75286|1993-06-24|1994-06-24

我的目标是删除重复值并输出类似

10001|76884|1995-06-24|1996-06-23
10001|75286|1993-06-24|1994-06-24

我写了一段代码如下

import java.io.IOException;
import java.util.*;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.conf.*;

import org.apache.hadoop.io.*;

import org.apache.hadoop.mapred.JobClient;

import org.apache.hadoop.mapreduce.*;

import org.apache.hadoop.mapreduce.Mapper.Context;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

public class charterSelDistRec {

public static class Map extends Mapper <LongWritable, Text, Text, Text> {
private String tableKey,tableValue;

public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {

String line = value.toString();
String splitarray[] = line.split("\\|",2);
tableKey = splitarray[0].trim();
tableValue = splitarray[1].trim();

context.write(new Text(tableKey), new Text(tableValue));
}
}

public static class Reduce extends Reducer <Text, Text, Text, Text> {
public void reduce(Text key, Iterator<Text> values, Context context)
throws IOException, InterruptedException {
String ColumnDelim="";
String tableOutValue=ColumnDelim+values;
context.write(new Text(key), new Text(tableOutValue));

}
}

public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = new Job(conf,"charterSelDistRec");
job.getConfiguration().set("mapreduce.job.queuename", "root.Dev");
job.getConfiguration().set("mapreduce.output.textoutputformat.separator","|");
job.setJobName("work_charter_stb.ext_chtr_vod_fyi_mapped");
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);

job.setMapperClass(Map.class);

job.setReducerClass(Reduce.class);

job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);


FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.setJarByClass(charterSelDistRec.class);
job.waitForCompletion(true);
}
}

但输出文件仍然有重复项。请让我知道我哪里错了。

最佳答案

没必要这么复杂。您所要做的就是:

  1. 在映射器中,将每一行作为键和任何值发出

  2. 在 reducer 中,只发出键而忽略值。

分享代码:

这里是输入:

10001|76884|1995-06-24|1996-06-2310001|76884|1995-06-24|1996-06-2310001|75286|1993-06-24|1994-06-24

Here is the code:

public class StackRemoveDup {

public static class MyMapper extends Mapper<LongWritable,Text, Text, NullWritable> {

@Override
public void map(LongWritable ignore, Text value, Context context)
throws java.io.IOException, InterruptedException {
context.write(value,NullWritable.get());
}
}

public static class MyReducer extends Reducer<Text, NullWritable, Text, NullWritable> {

@Override
public void reduce(Text key, Iterable<NullWritable> values, Context context)
throws IOException, InterruptedException {
context.write(key, NullWritable.get());
}
}

public static void main(String[] args)
throws IOException, ClassNotFoundException, InterruptedException {

Job job = new Job();
job.setJarByClass(StackRemoveDup.class);
job.setJobName("StackRemoveDup");

job.setMapperClass(MyMapper.class);
job.setReducerClass(MyReducer.class);

FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));

job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);

job.waitForCompletion(true);
}
}

这是输出:

10001|75286|1993-06-24|1994-06-2410001|76884|1995-06-24|1996-06-23

关于hadoop - 在 java map reduce 中选择不同的查询,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/31008385/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com