gpt4 book ai didi

具有自定义 Writable 的 Hadoop MapReduce 不同模式会产生重复键

转载 作者:行者123 更新时间:2023-12-02 20:59:25 25 4
gpt4 key购买 nike

我正在尝试实现不同的模式:

map(key, record):
emit record,null
reduce(key, records):
emit key

我的 key 是复杂的自定义 Writable .如果我在减少键及其哈希码中发出:
context.write(key, new IntWtitable(key.hashCode());

我收到以下输出:
key1 -1808937256
key2 -768063202
key3 906064410
key2 -768063202
key3 906064410

理论上,输出应该只包含 key1 , key2 , 和 key3因为我使用的是 HashPartitioner : 具有相同哈希码的键合并到同一个分区中。这显然不是这里的情况。

如果我正在转换我的复杂 Writable变成 Text对象(并相应地调整 Mapper/Reducer 类),并在 Mapper 中发出:
 context.write(new Text(key.toString()), NullWritable.get());

...输出如预期:
key1 1013632023
key2 762485389
key3 -1193948769

好的,这是一个说明行为的最小工作示例。

输入:
A A A A A
B B B B B
C C C C C
A A A A A
B B B B B

MapReduce 作业:
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;


public class DistinctPattern extends Configured implements Tool {
public static class DistinctMapper extends Mapper<Object, Text, ComplexObject, NullWritable> {


public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
ComplexObject o = new ComplexObject(value.toString());
context.write(o, NullWritable.get());
}
}

public static class DistinctReducer extends Reducer<ComplexObject, NullWritable, ComplexObject, IntWritable> {


public void reduce(ComplexObject key, Iterable<NullWritable> values, Context context)
throws IOException, InterruptedException {

context.write(key, new IntWritable(key.hashCode()));
}
}

public static class MyArrayWritable extends ArrayWritable {

public MyArrayWritable(Writable[] values) {
super(DatumObject.class, values);
}

public MyArrayWritable() {
super(DatumObject.class);
}

@Override
public String toString() {
return Arrays.toString(get());
}

}

public static class DatumObject implements Writable {
private String datum;

public DatumObject() {}

public DatumObject(String d) {
datum = d;
}

@Override
public void readFields(DataInput in) throws IOException {
datum = in.readUTF();
}

@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(datum);
}

@Override
public String toString() {
return datum;
}

@Override
public int hashCode() {
return 31 * datum.hashCode();
}

}

public static class ComplexObject implements WritableComparable<ComplexObject> {
private List<DatumObject> data = new ArrayList<>();

public ComplexObject() {}

public ComplexObject(String d) {
String[] elements = d.split(" ");
for(int i = 0; i < elements.length; i++)
data.add(new DatumObject(elements[i]));
}

public int size() {
return data.size();
}

@Override
public void readFields(DataInput in) throws IOException {
data.clear();
MyArrayWritable m = new MyArrayWritable();
m.readFields(in);
Writable[] w = m.get();
for(int i = 0; i < w.length; i++)
data.add((DatumObject) w[i]);

}

@Override
public void write(DataOutput out) throws IOException {
MyArrayWritable m = new MyArrayWritable(data.toArray(new DatumObject[data.size()]));
m.write(out);
}

@Override
public int compareTo(ComplexObject o) {
if(this.equals(o))
return 0;

if(o.size() < this.size())
return -1;

return 1;
}

@Override
public boolean equals(Object obj) {
if(!(obj instanceof ComplexObject))
return false;

ComplexObject other = (ComplexObject) obj;
return other.data.equals(data);
}

@Override
public int hashCode() {
return 31 * data.hashCode();
}

@Override
public String toString() {
StringBuilder s= new StringBuilder();
data.forEach( entry -> {
s.append(entry);
s.append(" ");
});

return s.toString();
}

}

@Override
public int run(String[] args) throws Exception {
Job job = Job.getInstance();
job.setJar("distinct.jar");
job.setJarByClass(DistinctPattern.class);
job.setMapperClass(DistinctMapper.class);
job.setReducerClass(DistinctReducer.class);
job.setMapOutputKeyClass(ComplexObject.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputKeyClass(ComplexObject.class);
job.setOutputValueClass(IntWritable.class);

FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));

return job.waitForCompletion(true) ? 0 : 1;
}

public static void main(String[] args) throws Exception {
int exitCode = ToolRunner.run(new DistinctPattern(), args);
System.exit(exitCode);
}
}

预期输出:
A A A A A       368623362
B B B B B 1285710467
C C C C C -2092169724

实际输出:
A A A A A       368623362
B B B B B 1285710467
C C C C C -2092169724
A A A A A 368623362
B B B B B 1285710467

我错过了什么?

PS:Hadoop 2.7.3

最佳答案

好的,在我的代码中发现了错误。首先,最小的工作示例缺少 equals 的实现。类中的方法DatumObject :

@Override
public boolean equals(Object obj) {
if(obj == null)
return false;

if(!(obj instanceof DatumObject))
return false;

DatumObject other = (DatumObject) obj;
return other.datum.equals(datum);
}

其次,我无法在最小工作示例中重现但出现在我的实际代码中的一个方面是,并非我的全部 key类确实实现了 WritableComparable界面。结果,我怀疑 shuffle 阶段没有按预期对键进行排序。一旦 compareTo方法在构成我的 key 的所有类中正确实现值( see class diagram here ),不同的模式按预期工作。

关于具有自定义 Writable 的 Hadoop MapReduce 不同模式会产生重复键,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/43615654/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com