gpt4 book ai didi

hadoop - Hadoop MapReduce 中的数据连接

转载 作者:可可西里 更新时间:2023-11-01 15:18:45 25 4
gpt4 key购买 nike

我正在尝试实现 Book Hadoop In Action 中给出的一个用例,但我不会编译代码。我是 Java 的新手,所以无法理解错误背后的确切原因。

有趣的是,另一段使用相同类和方法的代码编译成功。

hadoop@hadoopnode1:~/hadoop-0.20.2/playground/src$ javac -classpath /home/hadoop/hadoop-0.20.2/hadoop-0.20.2-core.jar:/home/hadoop/hadoop-0.20.2/lib/commons-cli-1.2.jar:/home/hadoop/hadoop-0.20.2/contrib/datajoin/hadoop-0.20.2-datajoin.jar -d ../classes DataJoin2.java 
DataJoin2.java:49: cannot find symbol
symbol : constructor TaggedWritable(org.apache.hadoop.io.Text)
location: class DataJoin2.TaggedWritable
TaggedWritable retv = new TaggedWritable((Text) value);
^
DataJoin2.java:69: cannot find symbol
symbol : constructor TaggedWritable(org.apache.hadoop.io.Text)
location: class DataJoin2.TaggedWritable
TaggedWritable retv = new TaggedWritable(new Text(joinedStr));
^
DataJoin2.java:113: setMapperClass(java.lang.Class<? extends org.apache.hadoop.mapreduce.Mapper>) in org.apache.hadoop.mapreduce.Job cannot be applied to (java.lang.Class<DataJoin2.MapClass>)
job.setMapperClass(MapClass.class);
^
DataJoin2.java:114: setReducerClass(java.lang.Class<? extends org.apache.hadoop.mapreduce.Reducer>) in org.apache.hadoop.mapreduce.Job cannot be applied to (java.lang.Class<DataJoin2.Reduce>)
job.setReducerClass(Reduce.class);
^
4 errors

----------------代码------------------------

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapred.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

// DataJoin Classes
import org.apache.hadoop.contrib.utils.join.DataJoinMapperBase;
import org.apache.hadoop.contrib.utils.join.TaggedMapOutput;
import org.apache.hadoop.contrib.utils.join.DataJoinReducerBase;

import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;


public class DataJoin2
{
public static class MapClass extends DataJoinMapperBase
{
protected Text generateInputTag(String inputFile)
{
String datasource = inputFile.split("-")[0];
return new Text(datasource);
}

protected Text generateGroupKey(TaggedMapOutput aRecord)
{
String line = ((Text) aRecord.getData()).toString();
String[] tokens = line.split(",");
String groupKey = tokens[0];
return new Text(groupKey);
}

protected TaggedMapOutput generateTaggedMapOutput(Object value)
{
TaggedWritable retv = new TaggedWritable((Text) value);
retv.setTag(this.inputTag);
return retv;
}
} // End of class MapClass

public static class Reduce extends DataJoinReducerBase
{
protected TaggedMapOutput combine(Object[] tags, Object[] values)
{
if (tags.length < 2) return null;
String joinedStr = "";
for (int i=0;i<values.length;i++)
{
if (i>0) joinedStr += ",";
TaggedWritable tw = (TaggedWritable) values[i];
String line = ((Text) tw.getData()).toString();
String[] tokens = line.split(",",2);
joinedStr += tokens[1];
}
TaggedWritable retv = new TaggedWritable(new Text(joinedStr));
retv.setTag((Text) tags[0]);
return retv;
}
} // End of class Reduce

public static class TaggedWritable extends TaggedMapOutput
{
private Writable data;

public TaggedWritable()
{
this.tag = new Text("");
this.data = data;
}

public Writable getData()
{
return data;
}

public void write(DataOutput out) throws IOException
{
this.tag.write(out);
this.data.write(out);
}

public void readFields(DataInput in) throws IOException
{
this.tag.readFields(in);
this.data.readFields(in);
}
} // End of class TaggedWritable

public static void main(String[] args) throws Exception
{
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 2) {
System.err.println("Usage: DataJoin2 <in> <out>");
System.exit(2);
}
Job job = new Job(conf, "DataJoin");
job.setJarByClass(DataJoin2.class);
job.setMapperClass(MapClass.class);
job.setReducerClass(Reduce.class);
job.setInputFormatClass(TextInputFormat.class);

job.setOutputKeyClass(Text.class);
job.setOutputValueClass(TaggedWritable.class);

FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}

最佳答案

错误消息没有任何歧义。它告诉您您没有为 TaggedWritable 提供构造函数,该构造函数采用 Text 类型的参数。您仅在发布的代码中显示无参数构造函数。

关于hadoop - Hadoop MapReduce 中的数据连接,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/10778372/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com