- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我已经开发了一个mapreduce代码,可以在Intellij上很好地运行并给出输出。当我在集群上运行相同的代码时,我得到一个空结果。我不断收到错误
15/07/21 08:28:04 INFO mapreduce.Job: Task Id : attempt_1436660204513_0254_m_000000_0, Status : FAILED Error: Plink/PlinkMapper : Unsupported major.minor version 51.0
Map-Reduce Framework
Map input records=18858
Map output records=0
Map output bytes=0
package Plink;
/**
* Created by Sai Bharath on 7/21/2015.
*/
import Utils.PlinkConstants;
import Utils.PlinkDataSetDto;
import Utils.PlinkDto;
import Utils.PropertyUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Created by bvarre on 10/29/2014.
*/
public class PlinkDriver extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
if (args.length < 6) {
System.err.printf("Usage: %s [generic options] <input> <output>\n",
getClass().getSimpleName());
ToolRunner.printGenericCommandUsage(System.err);
return -1;
}
Job job = new Job();
Configuration conf=job.getConfiguration();
conf.set("mapred.child.java.opts","-Xmx8g");
job.setJarByClass(PlinkDriver.class);
PropertyUtils.setConfigFromSystemProperty(job.getConfiguration());
FileInputFormat.addInputPath(job, new Path(args[0]));
FileInputFormat.addInputPath(job, new Path(args[1]));
FileOutputFormat.setOutputPath(job, new Path(args[2]));
if(args[3] != null && !args[3].isEmpty() && PlinkConstants.LOCAL_FILE_INPUT.equalsIgnoreCase(args[3])){
job.getConfiguration().set("snip.codes", args[4]);
job.getConfiguration().set("gene.codes", args[5]);
}
else {
DistributedCache.addCacheFile(new Path(args[4]).toUri(), job.getConfiguration());
DistributedCache.addCacheFile(new Path(args[5]).toUri(), job.getConfiguration());
DistributedCache.createSymlink(conf);
}
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setMapperClass(PlinkMapper.class);
// job.setCombinerClass(PlinkCombiner.class);
job.setReducerClass(PlinkReducer.class);
//Setup Partitioner
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(PlinkDto.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
return job.waitForCompletion(true) ? 0 : 1;
}
public static void main(String[] args) throws Exception {
int exitCode = ToolRunner.run(new PlinkDriver(),args);
System.exit(exitCode);
}
}
package Plink;
import Utils.PlinkDataSetDto;
import Utils.PlinkDto;
import Utils.PlinkResourceBundle;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
import java.util.*;
public class PlinkMapper extends Mapper<Object, Text, Text, PlinkDto> {
private List<String> snipCodes = new ArrayList<String>();
private List<String> geneCodes = new ArrayList<String>();
private String domain;
@Override
protected void setup(Context context) throws IOException,
InterruptedException {
super.setup(context);
Configuration conf = context.getConfiguration();
snipCodes = PlinkResourceBundle.getCodes(conf, "snip.codes");
geneCodes = PlinkResourceBundle.getCodes(conf, "gene.codes");
System.out.println(" snip code size in nMapper :: " + snipCodes.size());
System.out.println(" gene code size in nMapper :: " + geneCodes.size());
}
@Override
protected void map(Object key, Text value,
Context context) throws IOException, InterruptedException {
try {
String str = (value.toString());
if (str != null && !str.equals("")) {
List<String> items = Arrays.asList(str.split("\\s+"));
if(items!=null && items.size()>=3) {
List<PlinkDto> snipList = new ArrayList<PlinkDto>();
List<PlinkDto> geneList = new ArrayList<PlinkDto>();
Text plinkKey = new Text();
plinkKey.set(items.get(0));
if(!items.get(2).equalsIgnoreCase("null") && !items.get(2).equalsIgnoreCase("na")) {
PlinkDto plinkDto = new PlinkDto();
plinkDto.setCodeDesc(items.get(1));
plinkDto.setCodeValue(new Float(items.get(2)));
if (snipCodes.contains(items.get(1))) {
plinkDto.setCode("SNIP");
snipList.add(plinkDto);
} else if (geneCodes.contains(items.get(1))) {
plinkDto.setCode("GENE");
geneList.add(plinkDto);
}
context.write(plinkKey,plinkDto);
}
}
}
}catch(Exception ex){
//Collecting Patient data
ex.printStackTrace();
}
}
}
package Plink;
/**
* Created by Sai Bharath on 7/15/2015.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import Utils.PlinkDataSetDto;
import Utils.PlinkDto;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class PlinkReducer extends Reducer<Text, PlinkDto, Text, Text> {
@Override
public void reduce(Text key, Iterable<PlinkDto> values, Context context)
throws IOException, InterruptedException {
List<PlinkDto> snipList = new ArrayList<PlinkDto>();
List<PlinkDto> geneList = new ArrayList<PlinkDto>();
Iterator<PlinkDto> it=values.iterator();
while (it.hasNext()) {
PlinkDto tempDto = it.next();
if (tempDto.getCode().equalsIgnoreCase("SNIP")) {
PlinkDto snipDto = new PlinkDto();
snipDto.setCode(tempDto.getCode());
snipDto.setCodeDesc(tempDto.getCodeDesc());
snipDto.setCodeValue(tempDto.getCodeValue());
snipList.add(snipDto);
} else if (tempDto.getCode().equalsIgnoreCase("GENE")) {
PlinkDto geneDto = new PlinkDto();
geneDto.setCode(tempDto.getCode());
geneDto.setCodeDesc(tempDto.getCodeDesc());
geneDto.setCodeValue(tempDto.getCodeValue());
geneList.add(geneDto);
}
}
for(PlinkDto snip:snipList){
for(PlinkDto gene:geneList){
PlinkDataSetDto dataSetDto = new PlinkDataSetDto();
dataSetDto.setSnipCodeDesc(snip.getCodeDesc());
dataSetDto.setGeneCodeDesc(gene.getCodeDesc());
dataSetDto.setSnipCodeValue(snip.getCodeValue());
dataSetDto.setGeneCodeValue(gene.getCodeValue());
Text output = new Text();
output.set(dataSetDto.toString());
context.write(key,output);
}
}
}
}
package Utils;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import java.util.*;
public class PlinkResourceBundle {
private PlinkResourceBundle() {
}
public static List<String> getCodes(Configuration conf, String codeType) throws IOException {
List<String> codeList = new ArrayList<String>();
try {
String inFile = conf.get(codeType);
if (inFile != null) {
List<String> lines = HdfsUtils.readFile(inFile);
for (String line : lines) {
if (line != null && line.length() > 0) {
codeList.add(line.trim());
}
}
} else {
Path[] cachefiles = DistributedCache.getLocalCacheFiles(conf);
if (cachefiles.length > 0) {
BufferedReader reader = new BufferedReader(new FileReader(cachefiles[0].toString()));
String line;
while ((line = reader.readLine()) != null) {
codeList.add((line.trim()));
}
}
}
}
catch (Exception ex) {
System.out.println("Error in getting snip/gene codes " + ex.getMessage());
}
return codeList;
}//end of method
}
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>Plink</groupId>
<artifactId>Plink</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<jdkLevel>1.7</jdkLevel>
<requiredMavenVersion>[3.3,)</requiredMavenVersion>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.build.outputEncoding>UTF-8</project.build.outputEncoding>
</properties>
<distributionManagement>
<repository>
<id>code-artifacts</id>
<url>
http://code/artifacts/content/repositories/releases
</url>
</repository>
<snapshotRepository>
<id>code-artifacts</id>
<url>
http://code/artifacts/content/repositories/snapshots
</url>
</snapshotRepository>
</distributionManagement>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.18.1</version>
<configuration>
<skipTests>true</skipTests>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.3</version>
<configuration>
<source>${jdkLevel}</source>
<target>${jdkLevel}</target>
<showDeprecation>true</showDeprecation>
<showWarnings>true</showWarnings>
</configuration>
<dependencies>
<dependency>
<groupId>org.codehaus.groovy</groupId>
<artifactId>groovy-eclipse-compiler</artifactId>
<version>2.9.2-01</version>
</dependency>
<dependency>
<groupId>org.codehaus.groovy</groupId>
<artifactId>groovy-eclipse-batch</artifactId>
<version>2.4.3-01</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<outputDirectory>${project.build.directory}/lib</outputDirectory>
<includeScope>provided</includeScope>
<includeScope>compile</includeScope>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
<repositories>
<repository>
<releases>
<enabled>true</enabled>
<updatePolicy>always</updatePolicy>
<checksumPolicy>warn</checksumPolicy>
</releases>
<snapshots>
<enabled>false</enabled>
<updatePolicy>never</updatePolicy>
<checksumPolicy>fail</checksumPolicy>
</snapshots>
<id>HDPReleases</id>
<name>HDP Releases</name>
<url>http://repo.hortonworks.com/content/repositories/releases/</url>
<layout>default</layout>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>1.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.6.0.2.2.4.2-2</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.oozie</groupId>
<artifactId>oozie-core</artifactId>
<version>4.1.0</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<version>0.20.2</version>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.5</version>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>6.8.7</version>
</dependency>
<dependency>
<groupId>org.apache.mrunit</groupId>
<artifactId>mrunit</artifactId>
<version>1.0.0</version>
<classifier>hadoop2</classifier>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>1.9.5</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
<version>1.2</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>3.1</version>
</dependency>
</dependencies>
</project>
15/07/22 08:41:57 INFO mapreduce.Job: map 0% reduce 0%
15/07/22 08:42:06 INFO mapreduce.Job: map 100% reduce 0%
15/07/22 08:42:13 INFO mapreduce.Job: map 100% reduce 100%
15/07/22 08:42:13 INFO mapreduce.Job: Job job_1436660204513_0286 completed successfully
15/07/22 08:42:13 INFO mapreduce.Job: Counters: 50
File System Counters
FILE: Number of bytes read=6
FILE: Number of bytes written=364577
FILE: Number of read operations=0
FILE: Number of large read operations=0
FILE: Number of write operations=0
HDFS: Number of bytes read=604494
HDFS: Number of bytes written=0
HDFS: Number of read operations=9
HDFS: Number of large read operations=0
HDFS: Number of write operations=2
Job Counters
Launched map tasks=2
Launched reduce tasks=1
Other local map tasks=1
Rack-local map tasks=1
Total time spent by all maps in occupied slots (ms)=13453
Total time spent by all reduces in occupied slots (ms)=9188
Total time spent by all map tasks (ms)=13453
Total time spent by all reduce tasks (ms)=4594
Total vcore-seconds taken by all map tasks=13453
Total vcore-seconds taken by all reduce tasks=4594
Total megabyte-seconds taken by all map tasks=27551744
Total megabyte-seconds taken by all reduce tasks=18817024
Map-Reduce Framework
Map input records=18858
Map output records=0
Map output bytes=0
Map output materialized bytes=12
Input split bytes=266
Combine input records=0
Combine output records=0
Reduce input groups=0
Reduce shuffle bytes=12
Reduce input records=0
Reduce output records=0
Spilled Records=0
Shuffled Maps =2
Failed Shuffles=0
Merged Map outputs=2
GC time elapsed (ms)=118
CPU time spent (ms)=10260
Physical memory (bytes) snapshot=1023930368
Virtual memory (bytes) snapshot=9347194880
Total committed heap usage (bytes)=5474615296
Shuffle Errors
BAD_ID=0
CONNECTION=0
IO_ERROR=0
WRONG_LENGTH=0
WRONG_MAP=0
WRONG_REDUCE=0
File Input Format Counters
Bytes Read=604228
File Output Format Counters
Bytes Written=0
最佳答案
这似乎与集群上的运行时Java VM不兼容。 “不受支持的major.minor版本51.0”表示类文件PlinkMapper至少需要Java 7 VM。我建议您确认集群上正在运行什么版本的JRE。
下面列出了在类文件中定义的主要版本号-
关于java - Mapreduce作业在群集上提供空输出,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/31543702/
我已经为使用 JGroups 编写了简单的测试。有两个像这样的简单应用程序 import org.jgroups.*; import org.jgroups.conf.ConfiguratorFact
我有一个通过 ajax 检索的 json 编码数据集。我尝试检索的一些数据点将返回 null 或空。 但是,我不希望将那些 null 或空值显示给最终用户,或传递给其他函数。 我现在正在做的是检查
这个问题在这里已经有了答案: 关闭 11 年前。 Possible Duplicate: Why does one often see “null != variable” instead of “
嗨在我们公司,他们遵循与空值进行比较的严格规则。当我编码 if(variable!=null) 在代码审查中,我收到了对此的评论,将其更改为 if(null!=variable)。上面的代码对性能有影
我正在尝试使用 native Cordova QR 扫描仪插件编译项目,但是我不断收到此错误。据我了解,这是代码编写方式的问题,它向构造函数发送了错误的值,或者根本就没有找到构造函数。那么我该如何解决
我在装有 Java 1.8 的 Windows 10 上使用 Apache Nutch 1.14。我已按照 https://wiki.apache.org/nutch/NutchTutorial 中提
这个问题已经有答案了: 已关闭11 年前。 Possible Duplicate: what is “=null” and “ IS NULL” Is there any difference bet
Three-EyedRaven 内网渗透初期,我们都希望可以豪无遗漏的尽最大可能打开目标内网攻击面,故,设计该工具的初衷是解决某些工具内网探测速率慢、运行卡死、服务爆破误报率高以及socks流
我想在Scala中像在Java中那样做: public void recv(String from) { recv(from, null); } public void recv(String
我正在尝试从一组图像补丁中创建一个密码本。我已将图像(Caltech 101)分成20 X 20图像块。我想为每个补丁创建一个SIFT描述符。但是对于某些图像补丁,它不返回任何描述符/关键点。我尝试使
我在验证器类中自动连接的两个服务有问题。这些服务工作正常,因为在我的 Controller 中是自动连接的。我有一个 applicationContext.xml 文件和 MyApp-servlet.
已关闭。此问题不符合Stack Overflow guidelines 。目前不接受答案。 已关闭10 年前。 问题必须表现出对要解决的问题的最低程度的了解。告诉我们您尝试过做什么,为什么不起作用,以
大家好,我正在对数据库进行正常的选择,但是 mysql_num_rowsis 为空,我不知道为什么,我有 7 行选择。 如果您发现问题,请告诉我。 真的谢谢。 代码如下: function get_b
我想以以下格式创建一个字符串:id[]=%@&stringdata[]=%@&id[]=%@&stringdata[]=%@&id[]=%@&stringdata[]=%@&等,在for循环中,我得到
我正在尝试使用以下代码将URL转换为字符串: NSURL *urlOfOpenedFile = _service.myURLRequest.URL; NSString *fileThatWasOpen
我正在尝试将NSNumber传递到正在工作的UInt32中。然后,我试图将UInt32填充到NSData对象中。但是,这在这里变得有些时髦... 当我尝试将NSData对象中的内容写成它返回的字符串(
我正在进行身份验证并收到空 cookie。我想存储这个 cookie,但服务器没有返回给我 cookie。但响应代码是 200 ok。 httpConn.setRequestProperty(
我认为 Button bTutorial1 = (Button) findViewById(R.layout.tutorial1); bTutorial1.setOnClickListener
我的 Controller 中有这样的东西: model.attribute("hiringManagerMap",hiringManagerMap); 我正在访问此 hiringManagerMap
我想知道如何以正确的方式清空列表。在 div 中有一个列表然后清空 div 或列表更好吗? 我知道这是一个蹩脚的问题,但请帮助我理解这个 empty() 函数:) 案例)如果我运行这个脚本会发生什么:
我是一名优秀的程序员,十分优秀!