gpt4 book ai didi

java - Hadoop分布式缓存

转载 作者:行者123 更新时间:2023-12-01 13:41:43 24 4
gpt4 key购买 nike

我正在尝试使用hadoop分布式缓存来保留两个输入源和 map 。

因此,我制作了一个原型(prototype),将两个输入文件连接起来以使用分布式缓存,并且此问题成功解决。

但是,如果我编写包含多个 MapReduce 作业的程序,并且在该程序中,前一个作业的输出用作下一个作业的两个输入文件之一,则分布式缓存 api 不起作用。但是,分布式缓存文件不会发出任何内容。

这是我的工作司机。

public int run(String[] args) throws Exception {
Path InputPath = new Path(args[0]);
Path Inter = new Path("Inters") ;//new Path(args[1]);
Path OutputPath = new Path(args[1]);

JobConf conf = new JobConf(getConf(), Temp.class);
FileSystem fs = FileSystem.get(getConf());
conf.setJobName("wordcount");

conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);

conf.setMapperClass(FirstMap.class);
//conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);

conf.setMapOutputKeyClass(Text.class);
conf.setMapOutputValueClass(IntWritable.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
//conf.setNumReduceTasks(0);


//20131220 - to deal with paths as variables



//fs.delete(Inter);

//DistributedCache.addCacheFile(new Path(args[2]).toUri(), conf);
FileInputFormat.setInputPaths(conf, InputPath);
FileOutputFormat.setOutputPath(conf, Inter);
conf.set("threshold", args[2]);
JobClient.runJob(conf);


// start job 2

JobConf conf2 = new JobConf(getConf(), Temp.class);
conf2.setJobName("shit");

conf2.setMapOutputKeyClass(Text.class);
conf2.setMapOutputValueClass(IntWritable.class);

conf2.setOutputKeyClass(Text.class);
conf2.setOutputValueClass(IntWritable.class);

conf2.setMapperClass(Map.class);
//conf.setCombinerClass(Reduce.class);
conf2.setReducerClass(Reduce.class);
conf2.setNumReduceTasks(0);
conf2.setInputFormat(TextInputFormat.class);
conf2.setOutputFormat(TextOutputFormat.class);


//DistributedCache.addFileToClassPath(Inter, conf2);
//DistributedCache.addCacheFile(Inter.toUri(), conf2);
String InterToStroing = Inter.toString();
Path Inters = new Path(InterToStroing);

DistributedCache.addCacheFile(new Path(args[3]).toUri(), conf2);
FileInputFormat.setInputPaths(conf2, InputPath);
FileOutputFormat.setOutputPath(conf2, OutputPath);

conf2.set("threshold", "0");
JobClient.runJob(conf2);

return 0;
}

此外,这里是处理分布式缓存的映射函数。

public static class Map extends MapReduceBase implements
Mapper<LongWritable, Text, Text, IntWritable> {

static enum Counters {
INPUT_WORDS
}

private final static IntWritable one = new IntWritable(1);
private Text word = new Text();

private boolean caseSensitive = true;
private Set<String> patternsToSkip = new HashSet<String>();

private long numRecords = 0;
private String inputFile;
private Iterator<String> Iterator;

private Path[] localFiles;
public void configure (JobConf job) {
try {
localFiles = DistributedCache.getLocalCacheFiles(job);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
for (Path patternsFile : localFiles) {
parseSkipFile(patternsFile);
}
}
private void parseSkipFile(Path patternsFile) {
try {
BufferedReader fis = new BufferedReader(new FileReader(
patternsFile.toString()));
String pattern = null;
while ((pattern = fis.readLine()) != null) {
//String [] StrArr = pattern.split(" ");
System.err.println("Pattern : " + pattern );
patternsToSkip.add(pattern);
}
} catch (IOException ioe) {
System.err
.println("Caught exception while parsing the cached file '"
+ patternsFile
+ "' : "
+ StringUtils.stringifyException(ioe));
}
}

public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
//output.collect(value, one);


ArrayList<String> temp = new ArrayList<String>();

String line = value.toString();

Iterator = patternsToSkip.iterator();


while (Iterator.hasNext()) {
output.collect(new Text(Iterator.next()+"+"+value.toString()),one);
}
/*while (Iterator.hasNext()) {
output.collect(new Text(Iterator.next().toString()), one);
}*/
//output.collect(value, one);


}
}

有谁解决过这个问题吗?

最佳答案

这是我为了练习 hadoop 所做的事情。它包含多路径输入以及链接作业,在大学计算机实验室中进行减少侧连接。

public class StockJoinJob extends Configured  {

public static class KeyPartitioner extends Partitioner<TextIntPair, TextLongIntPair> {
@Override
public int getPartition(TextIntPair key, TextLongIntPair value, int numPartitions) {
return (key.getText().hashCode() & Integer.MAX_VALUE) % numPartitions;
}
}

public static int runJob(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = new Job(conf);
job.setJarByClass(StockJoinJob.class);

Path nasdaqPath = new Path(args[0]);
Path listPath = new Path(args[1]);
Path outputPath = new Path(args[2]+"-first");

MultipleInputs.addInputPath(job, listPath, TextInputFormat.class, CompanyMapper.class);
MultipleInputs.addInputPath(job, nasdaqPath,
StockInputFormat.class, StockMapper.class);
FileOutputFormat.setOutputPath(job, outputPath);

job.setPartitionerClass(KeyPartitioner.class);
job.setGroupingComparatorClass(TextIntPair.FirstComparator.class);

job.setMapOutputKeyClass(TextIntPair.class);
job.setMapOutputValueClass(TextLongIntPair.class);
job.setReducerClass(JoinReducer.class);

job.setOutputKeyClass(TextIntPair.class);
job.setOutputValueClass(TextLongPair.class);

return job.waitForCompletion(true) ? 0 : 1;
}

public static int runJob2(String[] args) throws Exception {
//need first comparator like previous job
Configuration conf = new Configuration();
Job job = new Job(conf);

job.setJarByClass(StockJoinJob.class);
job.setReducerClass(TotalReducer.class);
job.setMapperClass(TotalMapper.class);
Path firstPath = new Path(args[2]+"-first");
Path outputPath = new Path(args[2]+"-second");

//reducer output//
job.setOutputKeyClass(TextIntPair.class);
job.setOutputValueClass(TextLongPair.class);

//mapper output//
job.setMapOutputKeyClass(TextIntPair.class);
job.setMapOutputValueClass(TextIntPair.class);

//etc
FileInputFormat.setInputPaths(job, firstPath);
FileOutputFormat.setOutputPath(job, outputPath);
outputPath.getFileSystem(conf).delete(outputPath, true);

return job.waitForCompletion(true) ? 0 : 1;
}



public static void main(String[] args) throws Exception {
int firstCode = runJob(args);
if(firstCode==0){
int secondCode =runJob2(args);
System.exit(secondCode);
}


}
}

关于java - Hadoop分布式缓存,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/20698001/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com