gpt4 book ai didi

hadoop - WholeFileRecordReader 不能转换为 org.apache.hadoop.mapred.RecordReader

转载 作者:行者123 更新时间:2023-12-02 21:47:12 24 4
gpt4 key购买 nike

我想在 Hadoop 中创建一个新的数据类型,但我从我的自定义 inputformat 类中得到以下错误这是我的代码:

错误 - WholeFileRecordReader 无法转换为 org.apache.hadoop.mapred.RecordReader

代码 -

导入 java.io.IOException;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TaskAttemptContext;



public class wholeFileInputFormat extends FileInputFormat<Text, apriori>{

public RecordReader<Text, apriori> getRecordReader(
InputSplit input, JobConf job, Reporter reporter)
throws IOException {

reporter.setStatus(input.toString());

return (RecordReader<Text, apriori>) new WholeFileRecordReader(job,FileSplit)input);

}

}

我的自定义记录阅读器如下
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;

class WholeFileRecordReader extends RecordReader<Text, apriori> {


private FileSplit fileSplit;
private Configuration conf;
private InputStream in;
private Text key = new Text("");
private apriori value = new apriori();
private boolean processed = false;


public void initialize( JobConf job, FileSplit split)
throws IOException {

this.fileSplit = split;
this.conf = job;
final Path file = fileSplit.getPath();
String StringPath = new String(fileSplit.getPath().toString());
String StringPath2 = new String();
StringPath2 = StringPath.substring(5);
System.out.println(StringPath2);
in = new FileInputStream(StringPath2);

FileSystem fs = file.getFileSystem(conf);
in = fs.open(file);
}


public boolean nextKeyValue() throws IOException, InterruptedException {
if (!processed) {
byte[] contents = new byte[(int) fileSplit.getLength()];
Path file = fileSplit.getPath();
key.set(file.getName());

try {
IOUtils.readFully(in, contents, 0, contents.length);
value.set(contents, 0, contents.length);
} finally {
IOUtils.closeStream(in);
}

processed = true;
return true;
}

return false;
}

@Override
public Text getCurrentKey() throws IOException, InterruptedException {
return key;
}

@Override
public apriori getCurrentValue() throws IOException, InterruptedException {
return value;
}

@Override
public float getProgress() throws IOException {
return processed ? 1.0f : 0.0f;
}

@Override
public void close() throws IOException {
// Do nothing
}

@Override
public void initialize(InputSplit arg0, TaskAttemptContext arg1)
throws IOException, InterruptedException {
// TODO Auto-generated method stub

}

}

最佳答案

WholeFileRecordReader类是 org.apache.hadoop.mapreduce.RecordReader 的子类类。此类不能转换为 org.apache.hadoop.mapred.RecordReader类。你可以尝试在两个类中使用相同的 API

根据 Java 编程语言的规则,只有来自相同类型层次结构的类或接口(interface)(统称为类型)可以相互转换或转换。如果你尝试转换两个不共享相同类型层次结构的对象,即它们之间没有父子关系,你会得到编译时错误。你可以引用这个link

关于hadoop - WholeFileRecordReader 不能转换为 org.apache.hadoop.mapred.RecordReader,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/24142963/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com