gpt4 book ai didi

java - Hadoop - 线程 "main"java.lang.NullPointerException 中的异常

转载 作者:可可西里 更新时间:2023-11-01 15:02:29 26 4
gpt4 key购买 nike

我正尝试通过本教程在 Windows 平台上使用 Apache Hadoop:http://www.codeproject.com/Articles/757934/Apache-Hadoop-for-Windows-Platform?fid=1858035 , eclipse 部分。一切顺利,直到最后一步。运行程序时我得到:log4j:WARN 找不到记录器 (org.apache.hadoop.metrics2.lib.MutableMetricsFactory) 的附加程序。

log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
Exception in thread "main" java.lang.NullPointerException
at java.lang.ProcessBuilder.start(Unknown Source)
at org.apache.hadoop.util.Shell.runCommand(Shell.java:445)
at org.apache.hadoop.util.Shell.run(Shell.java:418)
at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:650)
at org.apache.hadoop.util.Shell.execCommand(Shell.java:739)
at org.apache.hadoop.util.Shell.execCommand(Shell.java:722)
at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:631)
at org.apache.hadoop.fs.RawLocalFileSystem.mkdirs(RawLocalFileSystem.java:421)
at org.apache.hadoop.fs.FilterFileSystem.mkdirs(FilterFileSystem.java:277)
at org.apache.hadoop.mapreduce.JobSubmissionFiles.getStagingDir(JobSubmissionFiles.java:125)
at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:348)
at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1285)
at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1282)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Unknown Source)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1548)
at org.apache.hadoop.mapreduce.Job.submit(Job.java:1282)
at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1303)
at Recipe.main(Recipe.java:82)

代码是:

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import com.google.gson.Gson;
public class Recipe {

public static class TokenizerMapper
extends Mapper<Object, Text, Text, IntWritable>{

private final static IntWritable one = new IntWritable(1);
private Text word = new Text();

Gson gson = new Gson();
public void map(Object key, Text value, Context context
) throws IOException, InterruptedException {

StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}

Roo roo=gson.fromJson(value.toString(),Roo.class);
if(roo.cookTime!=null)
{
word.set(roo.cookTime);
}
else
{
word.set("none");
}
context.write(word, one);
}
}

public static class IntSumReducer
extends Reducer<Text,IntWritable,Text,IntWritable> {
private IntWritable result = new IntWritable();

public void reduce(Text key, Iterable<IntWritable> values,
Context context
) throws IOException, InterruptedException {

int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}

public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
@SuppressWarnings("deprecation")
Job job = new Job(conf, "Recipe");
job.setJarByClass(Recipe.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
//FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

FileInputFormat.addInputPath(job, new Path("hdfs://127.0.0.1:9000/in"));
FileOutputFormat.setOutputPath(job, new Path("hdfs://127.0.0.1:9000/output"));
System.exit(job.waitForCompletion(true) ? 0 : 1);
// job.submit();
}
}

class Id
{

public String oid;
}


class Ts
{

public long date ;
}

class Roo
{
public Id _id ;
public String name ;
public String ingredients ;
public String url ;
public String image ;
public Ts ts ;
public String cookTime;
public String source ;
public String recipeYield ;
public String datePublished;
public String prepTime ;
public String description;
}

只有当我尝试通过 Eclipse 运行它时才会发生这种情况。通过 CMD 一切顺利:

javac -classpath C:\hadoop-2.3\share\hadoop\common\hadoop-common-2.3.0.jar;C:\hadoop-2.3\share\hadoop\common\lib\gson-2.2.4.jar;C:\hadoop-2.3\share\hadoop\common\lib\commons-cli-1.2.jar;C:\hadoop-2.3\share\hadoop\mapreduce\hadoop-mapreduce-client-core-2.3.0.jar;Recipe.java
jar -cvf Recipe.jar *.class
hadoop jar c:\Hwork\Recipe.jar Recipe /in /out

知道如何解决这个问题吗?

最佳答案

我从这里遇到了同样的问题和解决方法 http://qnalist.com/questions/4994960/run-spark-unit-test-on-windows-7修复它。

解决方法是:

  1. 从以下位置下载已编译的 winutils.exe https://codeload.github.com/srccodes/hadoop-common-2.2.0-bin/zip/master
  2. 将此存档直接提取到 d:\winutil 中,应该创建 d:\winutil\bin
  3. 在实例化 Job 之前将 System.setProperty("hadoop.home.dir", "d:\\winutil\\"); 添加到您的代码中(对于例如作为 main 方法的第一行)

关于java - Hadoop - 线程 "main"java.lang.NullPointerException 中的异常,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/27201505/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com