gpt4 book ai didi

java - 编译错误java hadoop程序

转载 作者:可可西里 更新时间:2023-11-01 16:34:19 24 4
gpt4 key购买 nike

<分区>

我编写了这个 Java hadoop 程序,它将执行文件的并行索引。该文件是在 eclipse 中创建的

package org.myorg;

import java.io.*;
import java.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;

public class ParallelIndexation {


public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable zero = new IntWritable(0);
private Text word = new Text();
public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
String line = value.toString();
int CountComputers;
//DataInputStream ConfigFile = new DataInputStream( new FileInputStream("countcomputers.txt"));
FileInputStream fstream = new FileInputStream("/usr/countcomputers.txt"); // путь к файлу
DataInputStream in = new DataInputStream(fstream);
BufferedReader br = new BufferedReader(new InputStreamReader(in));
String result = br.readLine(); // читаем как строку
CountComputers = Integer.parseInt(result); // переводим строку в число
//CountComputers=ConfigFile.readInt();
in.close();
fstream.close();
ArrayList<String> paths = new ArrayList<String>();
StringTokenizer tokenizer = new StringTokenizer(line, "\n");
while (tokenizer.hasMoreTokens())
{
paths.add(tokenizer.nextToken());
}
String[] ConcatPaths= new String[CountComputers];
int NumberOfElementConcatPaths=0;
if (paths.size()%CountComputers==0)
{
for (int i=0; i<CountComputers; i++)
{
ConcatPaths[i]=paths.get(NumberOfElementConcatPaths);
NumberOfElementConcatPaths+=paths.size()/CountComputers;
for (int j=1; j<paths.size()/CountComputers; j++)
{
ConcatPaths[i]+="\n"+paths.get(i*paths.size()/CountComputers+j);
}
}
}
else
{
NumberOfElementConcatPaths=0;
for (int i=0; i<paths.size()%CountComputers; i++)
{
ConcatPaths[i]=paths.get(NumberOfElementConcatPaths);
NumberOfElementConcatPaths+=paths.size()/CountComputers+1;
for (int j=1; j<paths.size()/CountComputers+1; j++)
{
ConcatPaths[i]+="\n"+paths.get(i*(paths.size()/CountComputers+1)+j);
}
}
for (int k=paths.size()%CountComputers; k<CountComputers; k++)
{
ConcatPaths[k]=paths.get(NumberOfElementConcatPaths);
NumberOfElementConcatPaths+=paths.size()/CountComputers;
for (int j=1; j<paths.size()/CountComputers; j++)
{
ConcatPaths[k]+="\n"+paths.get((k-paths.size()%CountComputers)*paths.size()/CountComputers+paths.size()%CountComputers*(paths.size()/CountComputers+1)+j);
}
}
}
//CountComputers=ConfigFile.readInt();
for (int i=0; i<ConcatPaths.length; i++)
{
word.set(ConcatPaths[i]);
output.collect(word, zero);
}
}
}



public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, LongWritable> {
public native long Traveser(String Path);
public native void Configure(String Path);
public void reduce(Text key, IntWritable value, OutputCollector<Text, LongWritable> output, Reporter reporter) throws IOException {
long count;
String line = key.toString();
ArrayList<String> ProcessedPaths = new ArrayList<String>();
StringTokenizer tokenizer = new StringTokenizer(line, "\n");
while (tokenizer.hasMoreTokens())
{
ProcessedPaths.add(tokenizer.nextToken());
}
Configure("/etc/nsindexer.conf");
for (int i=0; i<ProcessedPaths.size(); i++)
{
count=Traveser(ProcessedPaths.get(i));
}
output.collect(key, new LongWritable(count));
}
static
{
System.loadLibrary("nativelib");
}
}

public static void main(String[] args) throws Exception {
JobConf conf = new JobConf(ParallelIndexation.class);
conf.setJobName("parallelindexation");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(LongWritable.class);
conf.setMapperClass(Map.class);
conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}

由于团队在Nexenta Illumos操作系统(solaris)上编译

javac -classpath /export/hadoop-1.0.1/hadoop-core-1.0.1.jar -d folder/classes folder/src/ParallelIndexation.java,

收到以下错误

folder/src/ParallelIndexation.java:81: error: Reduce is not abstract and does not override abstract method reduce(Text,Iterator<IntWritable>,OutputCollector<Text,LongWritable>,Reporter) in Reducer
public static class Reduce extends MapReduceBase implements
^
1 error

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com