gpt4 book ai didi

hadoop - Reducer 没有被调用

转载 作者:可可西里 更新时间:2023-11-01 16:59:17 27 4
gpt4 key购买 nike

这是埃博拉数据集的代码。这里根本没有调用 reducer 。映射器输出仅被打印。

驱动类:

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class Ebola {
public static void main(String[] args) throws Exception , ArrayIndexOutOfBoundsException{

Configuration con1 = new Configuration();
con1.set("mapreduce.input.keyvaluelinerecordreader.key.value.separator", " ");
Job job1 = new Job(con1, "Ebola");

job1.setJarByClass(Ebola.class);
job1.setInputFormatClass(KeyValueTextInputFormat.class);
job1.setOutputFormatClass(TextOutputFormat.class);
job1.setOutputKeyClass(Text.class);
job1.setOutputValueClass(Text.class);
job1.setMapperClass(EbolaMapper.class);
job1.setReducerClass(EbolReducer.class);

FileInputFormat.addInputPath(job1, new Path(args[0]));
FileOutputFormat.setOutputPath(job1, new Path(args[1]));
job1.waitForCompletion(true);
}
}

这是映射器:

import java.io.IOException;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Mapper;
public class EbolaMapper extends Mapper <Text, Text, Text, Text> {
public void map(Text key, Text value, Context con) throws IOException, InterruptedException {
Text cumValues = new Text();
String record = value.toString();

String p[] = record.split(" ",2);

String cases = p[0];
String death = p[1];

String cValues = death + "->" + cases;

cumValues.set(cValues);

con.write(key, cumValues);
}
}

最后,reducer:

import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class EbolReducer extends Reducer<Text, Text, Text, Text> {
public void reduce(Text key, Text value, Context con) throws IOException{
Text cumulValues = new Text();
String cumVal = value.toString();
String[] p = cumVal.split("->",2);
String death = p[0];
String cases = p[1];
Float d = Float.parseFloat(death);
Float c = Float.parseFloat(cases);
Float perc = (d/c)*100;
String percent = String.valueOf(perc);
cumulValues.set(percent);
con.write(key,cumulValues);
}
}

输出只是映射器输出。 reducer 没有被调用。任何帮助都会 赞赏。

最佳答案

代替 public void reduce(Text key, Text value, Context con)

你需要使用 iterable 。

public void reduce(Text key, Iterable< Text> value, Context con)

关于hadoop - Reducer 没有被调用,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/26544279/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com