- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在尝试使用map reduce运行基本的字数统计工作。源代码可在官方网站上找到。
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.log4j.BasicConfigurator;
public class TestDriver {
public static class TokenizerMapper
extends Mapper<Object, Text, Text, IntWritable>{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context
) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
public static class IntSumReducer
extends Reducer<Text,IntWritable,Text,IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values,
Context context
) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
public static void main(String[] args) throws Exception {
BasicConfigurator.configure();
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "word count");
job.setJarByClass(TestDriver.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
log4j: Trying to find [log4j.xml] using context classloader sun.misc.Launcher$AppClassLoader@73d16e93.
log4j: Trying to find [log4j.xml] using sun.misc.Launcher$AppClassLoader@73d16e93 class loader.
log4j: Trying to find [log4j.xml] using ClassLoader.getSystemResource().
log4j: Trying to find [log4j.properties] using context classloader sun.misc.Launcher$AppClassLoader@73d16e93.
log4j: Trying to find [log4j.properties] using sun.misc.Launcher$AppClassLoader@73d16e93 class loader.
log4j: Trying to find [log4j.properties] using ClassLoader.getSystemResource().
log4j: Could not find resource: [null].
1 [main] DEBUG org.apache.hadoop.metrics2.lib.MutableMetricsFactory - field org.apache.hadoop.metrics2.lib.MutableRate org.apache.hadoop.security.UserGroupInformation$UgiMetrics.loginSuccess with annotation @org.apache.hadoop.metrics2.annotation.Metric(always=false, sampleName=Ops, about=, type=DEFAULT, value=[Rate of successful kerberos logins and latency (milliseconds)], valueName=Time)
39 [main] DEBUG org.apache.hadoop.metrics2.lib.MutableMetricsFactory - field org.apache.hadoop.metrics2.lib.MutableRate org.apache.hadoop.security.UserGroupInformation$UgiMetrics.loginFailure with annotation @org.apache.hadoop.metrics2.annotation.Metric(always=false, sampleName=Ops, about=, type=DEFAULT, value=[Rate of failed kerberos logins and latency (milliseconds)], valueName=Time)
40 [main] DEBUG org.apache.hadoop.metrics2.lib.MutableMetricsFactory - field org.apache.hadoop.metrics2.lib.MutableRate org.apache.hadoop.security.UserGroupInformation$UgiMetrics.getGroups with annotation @org.apache.hadoop.metrics2.annotation.Metric(always=false, sampleName=Ops, about=, type=DEFAULT, value=[GetGroups], valueName=Time)
46 [main] DEBUG org.apache.hadoop.metrics2.impl.MetricsSystemImpl - UgiMetrics, User and group related metrics
416 [main] DEBUG org.apache.hadoop.security.authentication.util.KerberosName - Kerberos krb5 configuration not found, setting default realm to empty
429 [main] DEBUG org.apache.hadoop.security.Groups - Creating new Groups object
439 [main] DEBUG org.apache.hadoop.util.NativeCodeLoader - Trying to load the custom-built native-hadoop library...
449 [main] DEBUG org.apache.hadoop.util.NativeCodeLoader - Loaded the native-hadoop library
451 [main] DEBUG org.apache.hadoop.security.JniBasedUnixGroupsMapping - Using JniBasedUnixGroupsMapping for Group resolution
451 [main] DEBUG org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback - Group mapping impl=org.apache.hadoop.security.JniBasedUnixGroupsMapping
545 [main] DEBUG org.apache.hadoop.security.Groups - Group mapping impl=org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback; cacheTimeout=300000; warningDeltaMs=5000
558 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - hadoop login
560 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - hadoop login commit
577 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - using local user:NTUserPrincipal: Arunaabh
577 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - Using user: "NTUserPrincipal: Arunaabh" with name Arunaabh
578 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - User entry: "Arunaabh"
579 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - UGI loginUser:Arunaabh (auth:SIMPLE)
862 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction as:Arunaabh (auth:SIMPLE) from:org.apache.hadoop.mapreduce.Job.connect(Job.java:1255)
878 [main] DEBUG org.apache.hadoop.mapreduce.Cluster - Trying ClientProtocolProvider : org.apache.hadoop.mapred.YarnClientProtocolProvider
879 [main] DEBUG org.apache.hadoop.mapreduce.Cluster - Cannot pick org.apache.hadoop.mapred.YarnClientProtocolProvider as the ClientProtocolProvider - returned null protocol
884 [main] DEBUG org.apache.hadoop.mapreduce.Cluster - Trying ClientProtocolProvider : org.apache.hadoop.mapred.LocalClientProtocolProvider
913 [main] INFO org.apache.hadoop.conf.Configuration.deprecation - session.id is deprecated. Instead, use dfs.metrics.session-id
916 [main] INFO org.apache.hadoop.metrics.jvm.JvmMetrics - Initializing JVM Metrics with processName=JobTracker, sessionId=
1000 [main] DEBUG org.apache.hadoop.mapreduce.Cluster - Picked org.apache.hadoop.mapred.LocalClientProtocolProvider as the ClientProtocolProvider
1002 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction as:Arunaabh (auth:SIMPLE) from:org.apache.hadoop.mapreduce.Cluster.getFileSystem(Cluster.java:162)
1014 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction as:Arunaabh (auth:SIMPLE) from:org.apache.hadoop.mapreduce.Job.submit(Job.java:1287)
1044 [main] DEBUG org.apache.hadoop.mapreduce.JobSubmitter - Configuring job job_local545640952_0001 with file:/tmp/hadoop-Arunaabh/mapred/staging/Arunaabh545640952/.staging/job_local545640952_0001 as the submit dir
1044 [main] DEBUG org.apache.hadoop.mapreduce.JobSubmitter - adding the following namenodes' delegation tokens:[file:///]
2292 [main] WARN org.apache.hadoop.mapreduce.JobResourceUploader - Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
2293 [main] DEBUG org.apache.hadoop.mapreduce.JobResourceUploader - default FileSystem: file:///
2392 [main] DEBUG org.apache.hadoop.io.nativeio.NativeIO - Initialized cache for IDs to User/Group mapping with a cache timeout of 14400 seconds.
2400 [main] INFO org.apache.hadoop.mapreduce.JobSubmitter - Cleaning up the staging area file:/tmp/hadoop-Arunaabh/mapred/staging/Arunaabh545640952/.staging/job_local545640952_0001
2401 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedActionException as:Arunaabh (auth:SIMPLE) cause:0: No such file or directory
Exception in thread "main" 0: No such file or directory
at org.apache.hadoop.io.nativeio.NativeIO$POSIX.chmod(NativeIO.java:236)
at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:724)
at org.apache.hadoop.fs.FilterFileSystem.setPermission(FilterFileSystem.java:502)
at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:600)
at org.apache.hadoop.mapreduce.JobResourceUploader.uploadFiles(JobResourceUploader.java:94)
at org.apache.hadoop.mapreduce.JobSubmitter.copyAndConfigureFiles(JobSubmitter.java:95)
at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:190)
at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1290)
at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1287)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Unknown Source)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
at org.apache.hadoop.mapreduce.Job.submit(Job.java:1287)
at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1308)
at TestDriver.main(TestDriver.java:62)
C:\Users\Arunaabh\workspace\TestHadoop\hadooptest.txt
C:\Users\Arunaabh\workspace\TestHadoop\output
最佳答案
您已将本地文件系统的输入和输出文件作为参数传递给mapreduce作业
C:\Users\Arunaabh\workspace\TestHadoop\hadooptest.txt
C:\Users\Arunaabh\workspace\TestHadoop\output
关于windows - 线程 “main”中的异常0:无此类文件或目录:hadoop map reduce,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/43169269/
我们有数据(此时未分配)要转换/聚合/透视到 wazoo。 我在 www 上看了看,我问的所有答案都指向 hadoop 可扩展、运行便宜(没有 SQL 服务器机器和许可证)、快速(如果你有足够的数据)
这很明显,我们都同意我们可以将 HDFS + YARN + MapReduce 称为 Hadoop。但是,Hadoop 生态系统中的其他不同组合和其他产品会怎样? 例如,HDFS + YARN + S
如果 es-hadoop 只是连接到 HDFS 的 Hadoop 连接器,它如何支持 Hadoop 分析? 最佳答案 我假设您指的是 this project .在这种情况下,ES Hadoop 项目
看完this和 this论文,我决定我想在 MapReduce 上为大型数据集实现分布式体积渲染设置作为我的本科论文工作。 Hadoop 是一个合理的选择吗? Java 不会扼杀一些性能提升或使与 C
我一直在尝试查找有关如何通过命令行提交 hadoop 作业的信息。 我知道命令 - hadoop jar jar-file 主类输入输出 还有另一个命令,我正在尝试查找有关它的信息,但未能找到 - h
Hadoop 服务器在 Kubernetes 中。而Hadoop客户端位于外网。所以我尝试使用 kubernetes-service 来使用 Hadoop 服务器。但是 hadoop fs -put
有没有人遇到奇怪的环境问题,在调用 hadoop 命令时被迫使用 SU 而不是 SUDO? sudo su -c 'hadoop fs -ls /' hdfs Found 4 itemsdrwxr-x
在更改 mapred-site.xml 中的属性后,我给出了一个 tar.bz2 文件、.gz 和 tar.gz 文件作为输入。以上似乎都没有奏效。我假设这里发生的是 hadoop 作为输入读取的记录
如何在 Hadoop Pipes 中获取正在 hadoop 映射器 中执行的输入文件 名称? 我可以很容易地在基于 java 的 map reducer 中获取文件名,比如 FileSplit fil
我想使用 MapReduce 方法分析连续的数据流(通过 HTTP 访问),因此我一直在研究 Apache Hadoop。不幸的是,Hadoop 似乎期望以固定大小的输入文件开始作业,而不是能够在新数
名称节点可以执行任务吗?默认情况下,任务在集群的数据节点上执行。 最佳答案 假设您正在询问MapReduce ... 使用YARN,MapReduce任务在应用程序主数据库中执行,而不是在nameno
我有一个关系A包含 (zip-code). 我还有另一个关系B包含 (name:gender:zip-code) (x:m:1234) (y:f:1234) (z:m:1245) (s:f:1235)
我是hadoop地区的新手。您能帮我负责(k2,list[v2,v2,v2...])形式的输出(意味着将键及其所有关联值组合在一起)的责任是吗? 谢谢。 最佳答案 这是Hadoop的MapReduce
因此,我一直在尝试编写一个hadoop程序,该程序将输入作为一个包含许多文件的文件,并且我希望hadoop程序的输出仅是输入文件的一行。但是我还没有做到这一点。我也不想去 reducer 课。如果有人
我使用的输入文本文件的内容是 1 "Come 1 "Defects," 1 "I 1 "Information 1 "J" 2 "Plain 5 "Project 1
谁能告诉我以下grep命令的作用: $ bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+' 最佳答案 http:/
我不了解mapreducer的基本功能,mapreducer是否有助于将文件放入HDFS 或mapreducer仅有助于分析HDFS中现有文件中的内容 我对hadoop非常陌生,任何人都可以指导我理解
CopyFromLocal将从本地文件系统上载数据。 不要放会从任何文件上传数据,例如。本地FS,亚马逊S3 或仅来自本地fs ??? 最佳答案 请找到两个命令的用法。 put ======= Usa
我开始研究hadoop mapreduce。 我是Java和hadoop的初学者,并且了解hadoop mapreduce的编码,但是有兴趣了解它在云中的内部工作方式。 您能否分享一些很好的链接来说明
我一直在寻找Hadoop mapreduce类的类路径。我正在使用Hortonworks 2.2.4版沙箱。我需要这样的类路径来运行我的javac编译器: javac -cp (CLASS_PATH)
我是一名优秀的程序员,十分优秀!