gpt4 book ai didi

hadoop - 在hadoop mapreduce中读取参数

转载 作者:可可西里 更新时间:2023-11-01 16:26:12 25 4
gpt4 key购买 nike

我是 hadoop mapreduce 的新手。我正在尝试在 map reduce 中实现搜索,所以我的输入文件是这样的

key1 value1,value3
key2 value2,value6

我想找到用户将作为命令行参数传递的键的值列表。为此我的主要(驱动程序)类是这样的

public static void main(String[] args) {
JobClient client = new JobClient();
JobConf conf = new JobConf(NameSearchJava.class);

// write now I am trying with writing search key in code (Joy),later I'll
//try to pass argument while running job from hadoop.

conf.set("searcKey", "Joy");
conf.setJobName("Search");

conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);

FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));


conf.setMapperClass(SearchMapper.class);

conf.setReducerClass(SearchReducer.class);
client.setConf(conf);

try {
JobClient.runJob(conf);
} catch (Exception e) {
e.printStackTrace();
}
}
}

and my configure function is:

String item ;
public void configure(JobConf job) {
{
item = job.get("test");
System.out.println(item);
System.err.println("search" + item);
}

在Mapper或者Reducer中应该在哪里写configure函数,在reducer中如何使用这个item参数来做比较,在hadoop中这样取参数才是正确的?

最佳答案

添加到 Hadooper 的答案。

这是完整的代码。

你可以引用Hadooper的答案进行解释。

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

/**
* @author Unmesha sreeveni
* @Date 23 sep 2014
*/
public class StringSearchDriver extends Configured implements Tool {
public static class Map extends
Mapper<LongWritable, Text, Text, IntWritable> {

private final static IntWritable one = new IntWritable(1);
private Text word = new Text();

public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
String line = value.toString();
String searchString = conf.get("word");
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if(token.equals(searchString)){
word.set(token);
context.write(word, one);
}

}
}
}

public static class Reduce extends
Reducer<Text, IntWritable, Text, IntWritable> {

public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {

int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
context.write(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
int res = ToolRunner.run(conf, new StringSearchDriver(), args);
System.exit(res);

}
@Override
public int run(String[] args) throws Exception {
// TODO Auto-generated method stub
if (args.length != 3) {
System.out
.printf("Usage: Search String <input dir> <output dir> <search word> \n");
System.exit(-1);
}

String source = args[0];
String dest = args[1];
String searchword = args[2];
Configuration conf = new Configuration();
conf.set("word", searchword);
Job job = new Job(conf, "Search String");
job.setJarByClass(StringSearchDriver.class);
FileSystem fs = FileSystem.get(conf);

Path in =new Path(source);
Path out =new Path(dest);
if (fs.exists(out)) {
fs.delete(out, true);
}

job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.addInputPath(job, in);
FileOutputFormat.setOutputPath(job, out);
boolean sucess = job.waitForCompletion(true);
return (sucess ? 0 : 1);
}
}

关于hadoop - 在hadoop mapreduce中读取参数,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/26009358/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com