gpt4 book ai didi

java - MapReduce ArrayList 类型不匹配

转载 作者:可可西里 更新时间:2023-11-01 16:14:26 27 4
gpt4 key购买 nike

大家好,我已经接触 Hadoop 一周了,并且正在试验它。

我有以下 CSV 输入值。

    PRAVEEN,400201399,Baby,026A1K,12/04/2010
PRAVEEN,4002013410,TOY,02038L,1/04/2014
PRAVEEN,2727272727272,abc,03383,03/14/2015
PRAVEEN,2263637373,cde,7373737,12/24/2012

Map 函数应该从 CSV 中选择第二个值作为键(即 400201399 等),第三个和最后一个值作为 VALUE(例如 TOY 和 12/04/2010),我想将值放在里面ArrayList 而不是文本。

但我收到以下错误 -

    Error: java.io.IOException: Type mismatch in value from map: expected org.apache.hadoop.io.Text, received java.util.ArrayList

Reduce 函数也很简单,我必须遍历列表并获得所需的结果作为最终值(在下面的 reduce 代码中我只从列表中选择日期)

这是我的代码 -

    package com.test.mapreduce;
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.KeyValueTextInputFormat;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;



public class RetailCustomerProduct extends Configured implements Tool {

public static class MapClass extends MapReduceBase
implements Mapper<LongWritable, Text, Text, List<Text> > {

private Text key1 = new Text();
private List<Text> productList = new ArrayList<Text>();
private Text value1 = new Text();
private Text product = new Text();
private int noofFields = 5;



public void map(LongWritable key, Text value,
OutputCollector<Text, List<Text>> output,
Reporter reporter) throws IOException {

String line = value.toString().replaceAll("\\s+","");
String[] split = line.split(",");


if(split.length!=noofFields){
return;
}

else {
key1.set((split[1]));
value1.set(split[4].toString().trim());
product.set(split[2].toString().trim());
productList.add(value1);
productList.add(product);


System.out.println(split[4].toString().trim());
output.collect(key1, productList);
}
}
}

public static class Reduce extends MapReduceBase implements Reducer<Text, List<Text>, Text, Text> {

public void reduce(Text key, Iterator<List<Text>> values, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {

SimpleDateFormat formatter = new SimpleDateFormat("MM/dd/yyyy");
Date date = new Date();

List<String> dateList = new ArrayList<String>();
List<String> productList = new ArrayList<String>();

for(Iterator<List<Text>> it = values; it.hasNext();) {
// add the values in the arrayList
dateList.add(((Text)it.next().get(0)).toString());
productList.add(((Text)it.next().get(1)).toString());
}

if(dateList.size()==1){

try {
date = formatter.parse(dateList.get(0).toString());
} catch (ParseException e) {
e.printStackTrace();
}
}
else {
String str = dateList.get(0).toString();
try {

date = formatter.parse(dateList.get(0).toString());

} catch (ParseException e1) {
e1.printStackTrace();
}

for(int i=0 ; i <dateList.size();++i){
try {

if((formatter.parse(dateList.get(i).toString())).compareTo(date)>0){
date=formatter.parse(dateList.get(i).toString());
// getting the max date from the list
}
}
catch (ParseException e) {
e.printStackTrace();
}
}
}

Text value = new Text(date.toString());
output.collect(key, value);
}
}



public int run(String[] args) throws Exception {
Configuration conf = getConf();

JobConf job = new JobConf(conf, RetailCustomerProduct.class);

Path in = new Path(args[0]);
Path out = new Path(args[1]);
FileInputFormat.setInputPaths(job, in);
FileOutputFormat.setOutputPath(job, out);

job.setJobName("RetailCustomerProduct");
job.setMapperClass(MapClass.class);
job.setReducerClass(Reduce.class);

job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);

job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.set("key.value.separator.in.input.line", ",");

JobClient.runJob(job);

return 0;
}

public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new RetailCustomerProduct(), args);

System.exit(res);
}

}

hadoop 中是否有任何不同的 ArrayList 实现?

我的 Map 函数应该以 Longwritable 作为 KEY 和 Text 作为 VALUE ,并且应该输出 Text 作为 KEY 和 ArrayList 作为 VALUE。

我的 Reduce 函数应该接受 Text 作为 KEY 和 ArrayList 作为 Value,然后输出 Text 作为 KEY 和 Text 作为 VALUE。

所以在驱动类中,必须包含哪些类,目前是这样的。

 job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);

job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);

请问谁能帮忙修改正确的代码?

最佳答案

我也是 Hadoop 新手。但我认为这一行是问题所在:

job.setOutputValueClass(Text.class);

这会将输出类型设置为 Text , 不是 List<Text> .我没有尝试输出列表。相反,我从列表中构建一个制表符分隔的字符串并将其输出为 Text 的实例。

new Text(split[4].toString().trim() + "\t" + split[2].toString().trim());

关于java - MapReduce ArrayList 类型不匹配,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/24808681/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com