gpt4 book ai didi

hadoop - 使用自定义FileInputFormat时遇到错误

转载 作者:行者123 更新时间:2023-12-02 21:51:32 26 4
gpt4 key购买 nike

嗨,我是MapReduce编程的新手,我正尝试从PDF文件中读取内容,以便我可以扩展程序以能够进行字数统计
以下是我的程序

package com.pdfreader;

import java.io.IOException;
import java.util.HashSet;
import java.util.StringTokenizer;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextOutputFormat;

import com.itextpdf.text.pdf.PdfReader;
import com.itextpdf.text.pdf.parser.PdfTextExtractor;
import com.pdfreader.newPDFReader.PDFInputFormat;

public class PDFReader1 {

static class PDFInputFormat extends FileInputFormat<Text, Text>
{


// TODO Auto-generated method stub

@Override
public RecordReader<Text, Text> getRecordReader(InputSplit arg0,
JobConf arg1, Reporter arg2) throws IOException {
// TODO Auto-generated method stub
HashSet<String> hset=new HashSet<String>();

PdfReader reader=new PdfReader("/home/a/Desktop/a.pdf");
Integer pagecount=reader.getNumberOfPages();

for(int i=1;i<=pagecount;i++)
{
String page=PdfTextExtractor.getTextFromPage(reader, i);
StringTokenizer tokenizer=new StringTokenizer(page);

while(tokenizer.hasMoreTokens())
{
String word=tokenizer.nextToken();
hset.add(word);
}
}
return null;
}
}

class WordCountMapper extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable>{


@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
// TODO Auto-generated method stub
String line=value.toString();
StringTokenizer tokenizer=new StringTokenizer(line);

}

}

public static void main(String[] args) throws IOException {
// TODO Auto-generated method stub
JobConf conf=new JobConf(PDFReader1.class);
conf.setJobName("PDFInputFormat");

conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);

conf.setMapperClass(WordCountMapper.class);


conf.setInputFormat(PDFInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);

FileInputFormat.setInputPaths(conf,new Path ("/home/a/Desktop/a.pdf"));
FileOutputFormat.setOutputPath(conf, new Path("/home/a/Desktop/Hadoop"));

JobClient.runJob(conf);

}

}

但是,这似乎不起作用
13/12/01 09:46:39 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
13/12/01 09:46:39 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
13/12/01 09:46:39 WARN mapred.JobClient: No job jar file set. User classes may not be found. See JobConf(Class) or JobConf#setJar(String).
13/12/01 09:46:39 INFO mapred.FileInputFormat: Total input paths to process : 1
13/12/01 09:46:40 INFO mapred.JobClient: Running job: job_local1646351819_0001
13/12/01 09:46:40 INFO mapred.LocalJobRunner: Waiting for map tasks
13/12/01 09:46:40 INFO mapred.LocalJobRunner: Starting task: attempt_local1646351819_0001_m_000000_0
13/12/01 09:46:40 INFO util.ProcessTree: setsid exited with exit code 0
13/12/01 09:46:40 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@12b7eea
13/12/01 09:46:41 INFO mapred.MapTask: Processing split: file:/home/vaibhavsrivastava/Desktop/a.pdf:0+382078
13/12/01 09:46:41 INFO mapred.JobClient: map 0% reduce 0%
13/12/01 09:46:43 INFO mapred.MapTask: numReduceTasks: 1
13/12/01 09:46:43 INFO mapred.MapTask: io.sort.mb = 100
13/12/01 09:47:27 INFO mapred.MapTask: data buffer = 79691776/99614720
13/12/01 09:47:27 INFO mapred.MapTask: record buffer = 262144/327680
13/12/01 09:47:31 INFO mapred.LocalJobRunner: Map task executor complete.
13/12/01 09:47:31 WARN mapred.LocalJobRunner: job_local1646351819_0001
java.lang.Exception: java.lang.RuntimeException: Error in configuring object
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:354)
Caused by: java.lang.RuntimeException: Error in configuring object
at org.apache.hadoop.util.ReflectionUtils.setJobConf(ReflectionUtils.java:93)
at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:64)
at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:117)
at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:426)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:366)
at org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:223)
at java.util.concurrent.Executors$RunnableAdapter.call(Unknown Source)
at java.util.concurrent.FutureTask$Sync.innerRun(Unknown Source)
at java.util.concurrent.FutureTask.run(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
at java.lang.Thread.run(Unknown Source)
Caused by: java.lang.reflect.InvocationTargetException
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source)
at java.lang.reflect.Method.invoke(Unknown Source)
at org.apache.hadoop.util.ReflectionUtils.setJobConf(ReflectionUtils.java:88)
... 11 more
Caused by: java.lang.RuntimeException: java.lang.NoSuchMethodException: org.apache.hadoop.mapred.Mapper.<init>()
at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:115)
at org.apache.hadoop.mapred.MapRunner.configure(MapRunner.java:34)
... 16 more
Caused by: java.lang.NoSuchMethodException: org.apache.hadoop.mapred.Mapper.<init>()
at java.lang.Class.getConstructor0(Unknown Source)
at java.lang.Class.getDeclaredConstructor(Unknown Source)
at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:109)
... 17 more
13/12/01 09:47:32 INFO mapred.JobClient: Job complete: job_local1646351819_0001
13/12/01 09:47:32 INFO mapred.JobClient: Counters: 0
13/12/01 09:47:32 INFO mapred.JobClient: Job Failed: NA
Exception in thread "main" java.io.IOException: Job failed!
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1357)
at com.pdfreader.PDFReader1.main(PDFReader1.java:91)

有人可以帮助解决这个问题,以便让我掌握如何做到这一点

最佳答案

您的mapper类当前是PDFReader1类的子级-因此,它的默认构造函数依赖于该父对象。您显然不能从代码中直接看到这一点,但是在已编译的源代码上运行javap时,您会看到使用单个PDFReader1参数生成了构造函数。

您看到的堆栈跟踪与此问题有关-Hadoop使用反射来实例化您的mapper类,但要求该mapper具有no arg默认构造函数。

修复起来很简单-只需在mapper类名称之前添加static关键字即可:

public static class WordCountMapper extends MapReduceBase

关于hadoop - 使用自定义FileInputFormat时遇到错误,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/20340581/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com