gpt4 book ai didi

org.apache.hadoop.tools.rumen.ZombieJob.sanitizeTaskRuntime()方法的使用及代码示例

转载 作者:知者 更新时间:2024-03-13 12:52:11 28 4
gpt4 key购买 nike

本文整理了Java中org.apache.hadoop.tools.rumen.ZombieJob.sanitizeTaskRuntime()方法的一些代码示例,展示了ZombieJob.sanitizeTaskRuntime()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZombieJob.sanitizeTaskRuntime()方法的具体详情如下:
包路径:org.apache.hadoop.tools.rumen.ZombieJob
类名称:ZombieJob
方法名:sanitizeTaskRuntime

ZombieJob.sanitizeTaskRuntime介绍

暂无

代码示例

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

private TaskAttemptInfo makeUpTaskAttemptInfo(TaskType taskType, TaskInfo taskInfo,
  int taskAttemptNumber, int taskNumber, int locality) {
 if (taskType == TaskType.MAP) {
  State state = State.SUCCEEDED;
  long runtime = 0;
  // make up state
  state = makeUpState(taskAttemptNumber, job.getMapperTriesToSucceed());
  runtime = makeUpMapRuntime(state, locality);
  runtime = sanitizeTaskRuntime(runtime, makeTaskAttemptID(taskType,
                       taskNumber, taskAttemptNumber));
  TaskAttemptInfo tai
   = new MapTaskAttemptInfo(state, taskInfo, runtime, null);
  return tai;
 } else if (taskType == TaskType.REDUCE) {
  State state = State.SUCCEEDED;
  long shuffleTime = 0;
  long sortTime = 0;
  long reduceTime = 0;
  // TODO make up state
  // state = makeUpState(taskAttemptNumber, job.getReducerTriesToSucceed());
  reduceTime = makeUpReduceRuntime(state);
  TaskAttemptInfo tai = new ReduceTaskAttemptInfo
   (state, taskInfo, shuffleTime, sortTime, reduceTime, null);
  return tai;
 }
 throw new IllegalArgumentException("taskType is neither MAP nor REDUCE: "
   + taskType);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

private TaskAttemptInfo makeUpTaskAttemptInfo(TaskType taskType, TaskInfo taskInfo,
  int taskAttemptNumber, int taskNumber, int locality) {
 if (taskType == TaskType.MAP) {
  State state = State.SUCCEEDED;
  long runtime = 0;
  // make up state
  state = makeUpState(taskAttemptNumber, job.getMapperTriesToSucceed());
  runtime = makeUpMapRuntime(state, locality);
  runtime = sanitizeTaskRuntime(runtime, makeTaskAttemptID(taskType,
                       taskNumber, taskAttemptNumber));
  TaskAttemptInfo tai
   = new MapTaskAttemptInfo(state, taskInfo, runtime, null);
  return tai;
 } else if (taskType == TaskType.REDUCE) {
  State state = State.SUCCEEDED;
  long shuffleTime = 0;
  long sortTime = 0;
  long reduceTime = 0;
  // TODO make up state
  // state = makeUpState(taskAttemptNumber, job.getReducerTriesToSucceed());
  reduceTime = makeUpReduceRuntime(state);
  TaskAttemptInfo tai = new ReduceTaskAttemptInfo
   (state, taskInfo, shuffleTime, sortTime, reduceTime, null);
  return tai;
 }
 throw new IllegalArgumentException("taskType is neither MAP nor REDUCE: "
   + taskType);
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

@SuppressWarnings("hiding") 
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
  LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
  double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
   rackRemoteOverNodeLocal };
 double scaleFactor = factors[locality] / factors[loggedLocality];
 State state = convertState(loggedAttempt.getResult());
 if (loggedTask.getTaskType() == Values.MAP) {
  long taskTime = 0;
  if (loggedAttempt.getStartTime() == 0) {
   taskTime = makeUpMapRuntime(state, locality);
  } else {
   taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
  }
  taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
  taskTime *= scaleFactor;
  return new MapTaskAttemptInfo
   (state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
 } else {
  throw new IllegalArgumentException("taskType can only be MAP: "
    + loggedTask.getTaskType());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

@SuppressWarnings("hiding") 
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
  LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
  double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
   rackRemoteOverNodeLocal };
 double scaleFactor = factors[locality] / factors[loggedLocality];
 State state = convertState(loggedAttempt.getResult());
 if (loggedTask.getTaskType() == Values.MAP) {
  long taskTime = 0;
  if (loggedAttempt.getStartTime() == 0) {
   taskTime = makeUpMapRuntime(state, locality);
  } else {
   taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
  }
  taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
  taskTime *= scaleFactor;
  return new MapTaskAttemptInfo
   (state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
 } else {
  throw new IllegalArgumentException("taskType can only be MAP: "
    + loggedTask.getTaskType());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
 taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
 return new MapTaskAttemptInfo(state, taskInfo, taskTime, allSplitVectors);
} else if (loggedTask.getTaskType() == Values.REDUCE) {
  long mergeTime = mergeDone - shuffleDone;
  long reduceTime = finishTime - mergeDone;
  reduceTime = sanitizeTaskRuntime(reduceTime, loggedAttempt.getAttemptID());

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
 taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
 return new MapTaskAttemptInfo(state, taskInfo, taskTime, allSplitVectors);
} else if (loggedTask.getTaskType() == Values.REDUCE) {
  long mergeTime = mergeDone - shuffleDone;
  long reduceTime = finishTime - mergeDone;
  reduceTime = sanitizeTaskRuntime(reduceTime, loggedAttempt.getAttemptID());

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com