gpt4 book ai didi

org.apache.hadoop.tools.rumen.ZombieJob类的使用及代码示例

转载 作者:知者 更新时间:2024-03-13 12:54:01 29 4
gpt4 key购买 nike

本文整理了Java中org.apache.hadoop.tools.rumen.ZombieJob类的一些代码示例,展示了ZombieJob类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZombieJob类的具体详情如下:
包路径:org.apache.hadoop.tools.rumen.ZombieJob
类名称:ZombieJob

ZombieJob介绍

[英]ZombieJob is a layer above LoggedJob raw JSON objects. Each ZombieJob object represents a job in job history. For everything that exists in job history, contents are returned unchanged faithfully. To get input splits of a non-exist task, a non-exist task attempt, or an ill-formed task attempt, proper objects are made up from statistical sketches.
[中]ZombieJob是LoggedJob原始JSON对象之上的一层。每个僵尸作业对象代表作业历史中的一个作业。对于工作历史记录中存在的所有内容,都会原封不动地返回。为了获得不存在的任务、不存在的任务尝试或格式不正确的任务尝试的输入拆分,从统计草图中生成适当的对象。

代码示例

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

private LoggedTask getLoggedTask(TaskType taskType, int taskNumber) {
 buildMaps();
 return loggedTaskMap.get(getMaskedTaskID(taskType, taskNumber));
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

private long makeUpReduceRuntime(State state) {
 long reduceTime = 0;
 for (int i = 0; i < 5; i++) {
  reduceTime = doMakeUpReduceRuntime(state);
  if (reduceTime >= 0) {
   return reduceTime;
  }
 }
 return 0;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

LoggedTask loggedTask = getLoggedTask(taskType, taskNumber);
if (loggedTask == null) {
 return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
   taskNumber, locality);
LoggedTaskAttempt loggedAttempt = getLoggedTaskAttempt(taskType,
  taskNumber, taskAttemptNumber);
if (loggedAttempt == null) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
   taskNumber, locality);
} else {
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
    taskNumber, locality);
 } else {
  return getTaskAttemptInfo(loggedTask, loggedAttempt);

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

@Override
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
 return getTaskInfo(getLoggedTask(taskType, taskNumber));
}

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

int taskAttemptNumber, int locality) {
TaskType taskType = TaskType.MAP;
LoggedTask loggedTask = getLoggedTask(taskType, taskNumber);
if (loggedTask == null) {
 return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
   taskNumber, locality);
LoggedTaskAttempt loggedAttempt = getLoggedTaskAttempt(taskType,
  taskNumber, taskAttemptNumber);
if (loggedAttempt == null) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
   taskNumber, locality);
} else {
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
    taskNumber, locality);
 } else if (loggedAttempt.getResult() == Values.FAILED) {
  return getTaskAttemptInfo(loggedTask, loggedAttempt);
 } else if (loggedAttempt.getResult() == Values.SUCCESS) {
  int loggedLocality = getLocality(loggedTask, loggedAttempt);
  if (locality == loggedLocality) {
   return getTaskAttemptInfo(loggedTask, loggedAttempt);
  } else {

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

private TaskAttemptInfo getTaskAttemptInfo(LoggedTask loggedTask,
  LoggedTaskAttempt loggedAttempt) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 State state = convertState(loggedAttempt.getResult());
 if (loggedTask.getTaskType() == Values.MAP) {
  long taskTime;
  if (loggedAttempt.getStartTime() == 0) {
   int locality = getLocality(loggedTask, loggedAttempt);
   taskTime = makeUpMapRuntime(state, locality);
  } else {
   taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
  taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
  return new MapTaskAttemptInfo(state, taskInfo, taskTime, allSplitVectors);
 } else if (loggedTask.getTaskType() == Values.REDUCE) {
   long reduceTime = makeUpReduceRuntime(state);
   return new ReduceTaskAttemptInfo
    (state, taskInfo, 0, 0, reduceTime, allSplitVectors);
   long mergeTime = mergeDone - shuffleDone;
   long reduceTime = finishTime - mergeDone;
   reduceTime = sanitizeTaskRuntime(reduceTime, loggedAttempt.getAttemptID());

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

@SuppressWarnings("hiding") 
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
  LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
  double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
   rackRemoteOverNodeLocal };
 double scaleFactor = factors[locality] / factors[loggedLocality];
 State state = convertState(loggedAttempt.getResult());
 if (loggedTask.getTaskType() == Values.MAP) {
  long taskTime = 0;
  if (loggedAttempt.getStartTime() == 0) {
   taskTime = makeUpMapRuntime(state, locality);
  } else {
   taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
  }
  taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
  taskTime *= scaleFactor;
  return new MapTaskAttemptInfo
   (state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
 } else {
  throw new IllegalArgumentException("taskType can only be MAP: "
    + loggedTask.getTaskType());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

@Override
public JobID getJobID() {
 return getLoggedJob().getJobID();
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

@Override
public ZombieJob getNextJob() throws IOException {
 LoggedJob job = reader.getNext();
 if (job == null) {
  return null;
 } else if (hasRandomSeed) {
  long subRandomSeed = RandomSeedGenerator.getSeed(
     "forZombieJob" + job.getJobID(), randomSeed);
  return new ZombieJob(job, cluster, subRandomSeed);
 } else {
  return new ZombieJob(job, cluster);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

private int getLocality(LoggedTask loggedTask, LoggedTaskAttempt loggedAttempt) {
 int distance = cluster.getMaximumDistance();
 String rackHostName = loggedAttempt.getHostName().getValue();
 if (rackHostName == null) {
  return distance;
 }
 MachineNode mn = getMachineNode(rackHostName);
 if (mn == null) {
  return distance;
 }
 List<LoggedLocation> locations = loggedTask.getPreferredLocations();
 if (locations != null) {
  for (LoggedLocation location : locations) {
   List<NodeName> layers = location.getLayers();
   if ((layers == null) || (layers.isEmpty())) {
    continue;
   }
   String dataNodeName = layers.get(layers.size()-1).getValue();
   MachineNode dataNode = cluster.getMachineByName(dataNodeName);
   if (dataNode != null) {
    distance = Math.min(distance, cluster.distance(mn, dataNode));
   }
  }
 }
 return distance;
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

int taskAttemptNumber, int locality) {
TaskType taskType = TaskType.MAP;
LoggedTask loggedTask = getLoggedTask(taskType, taskNumber);
if (loggedTask == null) {
 return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
   taskNumber, locality);
LoggedTaskAttempt loggedAttempt = getLoggedTaskAttempt(taskType,
  taskNumber, taskAttemptNumber);
if (loggedAttempt == null) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
   taskNumber, locality);
} else {
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
    taskNumber, locality);
 } else if (loggedAttempt.getResult() == Values.FAILED) {
  return getTaskAttemptInfo(loggedTask, loggedAttempt);
 } else if (loggedAttempt.getResult() == Values.SUCCESS) {
  int loggedLocality = getLocality(loggedTask, loggedAttempt);
  if (locality == loggedLocality) {
   return getTaskAttemptInfo(loggedTask, loggedAttempt);
  } else {

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

private TaskAttemptInfo getTaskAttemptInfo(LoggedTask loggedTask,
  LoggedTaskAttempt loggedAttempt) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 State state = convertState(loggedAttempt.getResult());
 if (loggedTask.getTaskType() == Values.MAP) {
  long taskTime;
  if (loggedAttempt.getStartTime() == 0) {
   int locality = getLocality(loggedTask, loggedAttempt);
   taskTime = makeUpMapRuntime(state, locality);
  } else {
   taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
  taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
  return new MapTaskAttemptInfo(state, taskInfo, taskTime, allSplitVectors);
 } else if (loggedTask.getTaskType() == Values.REDUCE) {
   long reduceTime = makeUpReduceRuntime(state);
   return new ReduceTaskAttemptInfo
    (state, taskInfo, 0, 0, reduceTime, allSplitVectors);
   long mergeTime = mergeDone - shuffleDone;
   long reduceTime = finishTime - mergeDone;
   reduceTime = sanitizeTaskRuntime(reduceTime, loggedAttempt.getAttemptID());

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

@SuppressWarnings("hiding") 
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
  LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
  double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
   rackRemoteOverNodeLocal };
 double scaleFactor = factors[locality] / factors[loggedLocality];
 State state = convertState(loggedAttempt.getResult());
 if (loggedTask.getTaskType() == Values.MAP) {
  long taskTime = 0;
  if (loggedAttempt.getStartTime() == 0) {
   taskTime = makeUpMapRuntime(state, locality);
  } else {
   taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
  }
  taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
  taskTime *= scaleFactor;
  return new MapTaskAttemptInfo
   (state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
 } else {
  throw new IllegalArgumentException("taskType can only be MAP: "
    + loggedTask.getTaskType());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

@Override
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
 return getTaskInfo(getLoggedTask(taskType, taskNumber));
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

@Override
public JobID getJobID() {
 return getLoggedJob().getJobID();
}

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

@Override
public ZombieJob getNextJob() throws IOException {
 LoggedJob job = reader.getNext();
 if (job == null) {
  return null;
 } else if (hasRandomSeed) {
  long subRandomSeed = RandomSeedGenerator.getSeed(
     "forZombieJob" + job.getJobID(), randomSeed);
  return new ZombieJob(job, cluster, subRandomSeed);
 } else {
  return new ZombieJob(job, cluster);
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

private int getLocality(LoggedTask loggedTask, LoggedTaskAttempt loggedAttempt) {
 int distance = cluster.getMaximumDistance();
 String rackHostName = loggedAttempt.getHostName().getValue();
 if (rackHostName == null) {
  return distance;
 }
 MachineNode mn = getMachineNode(rackHostName);
 if (mn == null) {
  return distance;
 }
 List<LoggedLocation> locations = loggedTask.getPreferredLocations();
 if (locations != null) {
  for (LoggedLocation location : locations) {
   List<NodeName> layers = location.getLayers();
   if ((layers == null) || (layers.isEmpty())) {
    continue;
   }
   String dataNodeName = layers.get(layers.size()-1).getValue();
   MachineNode dataNode = cluster.getMachineByName(dataNodeName);
   if (dataNode != null) {
    distance = Math.min(distance, cluster.distance(mn, dataNode));
   }
  }
 }
 return distance;
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

LoggedTask loggedTask = getLoggedTask(taskType, taskNumber);
if (loggedTask == null) {
 return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
   taskNumber, locality);
LoggedTaskAttempt loggedAttempt = getLoggedTaskAttempt(taskType,
  taskNumber, taskAttemptNumber);
if (loggedAttempt == null) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
   taskNumber, locality);
} else {
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
    taskNumber, locality);
 } else {
  return getTaskAttemptInfo(loggedTask, loggedAttempt);

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

private LoggedTask getLoggedTask(TaskType taskType, int taskNumber) {
 buildMaps();
 return loggedTaskMap.get(getMaskedTaskID(taskType, taskNumber));
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

job = parser.getNextJob();
ZombieJob zJob = (ZombieJob) job;
LoggedJob loggedJob = zJob.getLoggedJob();
System.out.println(i + ":" + job.getNumberMaps() + "m, "
  + job.getNumberReduces() + "r");

29 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com