gpt4 book ai didi

java - MapReduce 程序不读取超出限制的文本

转载 作者:可可西里 更新时间:2023-11-01 15:51:11 25 4
gpt4 key购买 nike

我是 Hadoop 的新手,正在学习一些 mapreduce 程序。我试图使用 Mapper 类读取 CSV 文件。CSV 包含标题和直到 20 列的值。奇怪的是在读取 CSV 文件时程序正在运行很好,直到我正在读取第 17 个索引但得到 ArrayOutOfBondException。我无法理解,即使存在第 18 个索引,它也会抛出异常。

这是我的代码:

package org.apress.prohadoop.c3;

import java.io.IOException;
import java.util.Iterator;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.log4j.Logger;
import org.apress.prohadoop.c3.CSVFileProcessor.LastFmConstants;

public class CSVFileProcessorNewAPI {

protected static Logger logger = Logger.getLogger(CSVFileProcessorNewAPI.class);

public class LastFmConstants {

public static final int match_id = 0;
public static final int inning_id= 1;
public static final int batting_team = 2;
public static final int bowling_team = 3;
public static final int over = 4;
public static final int ball = 5;
public static final int batsman = 6;
public static final int non_striker = 7;
public static final int bowler = 8;
public static final int is_super_over = 9;
public static final int wide_runs = 10;
public static final int total_runs_inOver=17;

public static final int player_dismissed=18;
public static final int dismissal_kind=19;
}

public static class MyMapper extends MapReduceBase
implements Mapper<LongWritable, Text, Text, IntWritable> {


public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {

logger.info("Vibhas Logger Started");

try {
if ((key).get() == 0 && value.toString().contains("header") /*Some condition satisfying it is header*/)
return;
} catch (Exception e) {
e.printStackTrace();
}

String[] parts = value.toString().split("[,]");

String inning_id=parts[LastFmConstants.inning_id];
String match_id_=parts[LastFmConstants.match_id];
String batting_team=parts[LastFmConstants.batting_team];
String bowling_team=parts[LastFmConstants.bowling_team];
String over=parts[LastFmConstants.over];
String ball=parts[LastFmConstants.ball];
String batsman=parts[LastFmConstants.batsman];
String non_striker=parts[LastFmConstants.non_striker];
String bowler=parts[LastFmConstants.bowler];
String wide_runs=parts[LastFmConstants.wide_runs];
String total_runs_inOver=parts[LastFmConstants.total_runs_inOver];
String player_Dismissed=parts[LastFmConstants.player_dismissed];
String dismissal_kind=parts[LastFmConstants.dismissal_kind];

if(!bowler.isEmpty() && bowler.trim().contains("Chahal") && dismissal_kind.equalsIgnoreCase("S Dhawan")){
int runs=Integer.parseInt(total_runs_inOver);
output.collect(new Text("Match-->"+match_id_), new IntWritable(runs));
}
}
}

public static class MyReducer extends MapReduceBase
implements Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key,
Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {
logger.info("Vibhas Reducer Started");
int sum = 0;
while (values.hasNext()) {
sum += values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}

public static void main(String[] args) throws Exception {
JobConf conf = new JobConf(CSVFileProcessorNewAPI.class);
conf.setJobName("CSVFileProcessorNewAPI Job");

conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);

conf.setMapperClass(MyMapper.class);
conf.setCombinerClass(MyReducer.class);
conf.setReducerClass(MyReducer.class);
conf.setNumReduceTasks(1);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));

JobClient.runJob(conf);
}
}

CSV 文件格式:

match_id,inning,batting_team,bowling_team,over,ball,batsman,non_striker,bowler,is_super_over,wide_runs,bye_runs,legbye_runs,noball_runs,penalty_runs,batsman_runs,extra_runs,total_runs,player_dismissed,dismissal_kind,fielder

1,1,Sunrisers Hyderabad,Royal Challengers Bangalore,1,1,DA Warner,S Dhawan,TS Mills,0,0,0,0,0,0,0,0,0,,,

1,1,Sunrisers Hyderabad,Royal Challengers Bangalore,1,2,DA Warner,S Dhawan,TS Mills,0,0,0,0,0,0,0,0,0,,,

异常(exception):

hadoop jar /home/cloudera/Downloads/pro-apache-hadoop-master/prohadoop.jar org.apress.prohadoop.c3.CSVFileProcessorNewAPI /Input/test.csv /outPutCSV
18/03/15 02:19:19 INFO client.RMProxy: Connecting to ResourceManager at /0.0.0.0:8032
18/03/15 02:19:20 INFO client.RMProxy: Connecting to ResourceManager at /0.0.0.0:8032
18/03/15 02:19:20 WARN mapreduce.JobResourceUploader: Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
18/03/15 02:19:20 INFO mapred.FileInputFormat: Total input paths to process : 1
18/03/15 02:19:20 INFO mapreduce.JobSubmitter: number of splits:2
18/03/15 02:19:21 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1520413460063_0042
18/03/15 02:19:21 INFO impl.YarnClientImpl: Submitted application application_1520413460063_0042
18/03/15 02:19:21 INFO mapreduce.Job: The url to track the job: http://quickstart.cloudera:8088/proxy/application_1520413460063_0042/
18/03/15 02:19:21 INFO mapreduce.Job: Running job: job_1520413460063_0042
18/03/15 02:19:27 INFO mapreduce.Job: Job job_1520413460063_0042 running in uber mode : false
18/03/15 02:19:27 INFO mapreduce.Job: map 0% reduce 0%
18/03/15 02:19:43 INFO mapreduce.Job: map 50% reduce 0%
18/03/15 02:19:45 INFO mapreduce.Job: Task Id : attempt_1520413460063_0042_m_000001_0, Status : FAILED
Error: java.lang.ArrayIndexOutOfBoundsException: 18
at org.apress.prohadoop.c3.CSVFileProcessorNewAPI$MyMapper.map(CSVFileProcessorNewAPI.java:77)
at org.apress.prohadoop.c3.CSVFileProcessorNewAPI$MyMapper.map(CSVFileProcessorNewAPI.java:1)
at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:54)
at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:459)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:343)
at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:164)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1917)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:158)

请帮我解决这个问题。

最佳答案

这样做很危险 String[] parts = value.toString().split("[,]"); 然后假设你得到的每条记录都有正确的数量列。特别是在处理大量数据时,接收到“脏”数据的可能性是不容忽视的。只需要一个坏行,你的整个工作就会结束。

你应该做一个检查:

String[] parts = value.toString().split(",", -1);
if (parts != null && parts.length == 20) {
//your normal logic
} else {
logger.warn("Unparseable record identified: {}", value);
}

关于java - MapReduce 程序不读取超出限制的文本,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/49297761/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com