gpt4 book ai didi

org.apache.hadoop.mapred.YARNRunner类的使用及代码示例

转载 作者:知者 更新时间:2024-03-15 01:13:31 29 4
gpt4 key购买 nike

本文整理了Java中org.apache.hadoop.mapred.YARNRunner类的一些代码示例,展示了YARNRunner类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。YARNRunner类的具体详情如下:
包路径:org.apache.hadoop.mapred.YARNRunner
类名称:YARNRunner

YARNRunner介绍

[英]This class enables the current JobClient (0.22 hadoop) to run on YARN.
[中]此类使当前的JobClient(0.22 hadoop)能够在Thread上运行。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-jobclient

@Override
public ClientProtocol create(Configuration conf) throws IOException {
 if (MRConfig.YARN_FRAMEWORK_NAME.equals(conf.get(MRConfig.FRAMEWORK_NAME))) {
  return new YARNRunner(conf);
 }
 return null;
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-jobclient

@Override
 public void close(ClientProtocol clientProtocol) throws IOException {
  if (clientProtocol instanceof YARNRunner) {
   ((YARNRunner)clientProtocol).close();
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-jobclient

private LocalResource createApplicationResource(FileContext fs, Path p,
  LocalResourceType type) throws IOException {
 return createApplicationResource(fs, p, null, type,
   LocalResourceVisibility.APPLICATION, false);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclient

@Override
 public Void run() throws Exception {
  yarnRunner = new YARNRunner(conf, null, null);
  yarnRunner.getDelegationTokenFromHS(hsProxy);
  verify(hsProxy).
   getDelegationToken(any(GetDelegationTokenRequest.class));
  return null;
 }
});

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-jobclient

@Override
public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
throws IOException, InterruptedException {
 
 addHistoryToken(ts);
 
 // Construct necessary information to start the MR AM
 ApplicationSubmissionContext appContext =
  createApplicationSubmissionContext(conf, jobSubmitDir, ts);
 // Submit to ResourceManager
 try {
  ApplicationId applicationId =
    resMgrDelegate.submitApplication(appContext);
  ApplicationReport appMaster = resMgrDelegate
    .getApplicationReport(applicationId);
  String diagnostics =
    (appMaster == null ?
      "application report is null" : appMaster.getDiagnostics());
  if (appMaster == null
    || appMaster.getYarnApplicationState() == YarnApplicationState.FAILED
    || appMaster.getYarnApplicationState() == YarnApplicationState.KILLED) {
   throw new IOException("Failed to run job : " +
     diagnostics);
  }
  return clientCache.getClient(jobId).getJobStatus(jobId);
 } catch (YarnException e) {
  throw new IOException(e);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-jobclient

killUnFinishedApplication(appId);
 return;
 killApplication(appId);
 return;
          MRJobConfig.DEFAULT_MR_AM_HARD_KILL_TIMEOUT_MS);
 while ((currentTimeMillis < timeKillIssued + killTimeOut)
   && !isJobInTerminalState(status)) {
  try {
   Thread.sleep(1000L);
  status = clientCache.getClient(arg0).getJobStatus(arg0);
  if (status == null) {
   killUnFinishedApplication(appId);
   return;
 LOG.debug("Error when checking for application status", io);
if (status != null && !isJobInTerminalState(status)) {
 killApplication(appId);

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclient

@Before
public void setUp() throws Exception {
 resourceMgrDelegate = mock(ResourceMgrDelegate.class);
 conf = new YarnConfiguration();
 conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM");
 clientCache = new ClientCache(conf, resourceMgrDelegate);
 clientCache = spy(clientCache);
 yarnRunner = new YARNRunner(conf, resourceMgrDelegate, clientCache);
 yarnRunner = spy(yarnRunner);
 submissionContext = mock(ApplicationSubmissionContext.class);
 doAnswer(
   new Answer<ApplicationSubmissionContext>() {
    @Override
    public ApplicationSubmissionContext answer(InvocationOnMock invocation)
      throws Throwable {
     return submissionContext;
    }
   }
   ).when(yarnRunner).createApplicationSubmissionContext(any(Configuration.class),
     any(String.class), any(Credentials.class));
 appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
 jobId = TypeConverter.fromYarn(appId);
 if (testWorkDir.exists()) {
  FileContext.getLocalFSFileContext().delete(new Path(testWorkDir.toString()), true);
 }
 testWorkDir.mkdirs();
}

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclient

YARNRunner yarnRunner = new YARNRunner(conf, rmDelegate, clientCache);
yarnRunner.addHistoryToken(creds);
verify(mockHsProxy, times(0)).getDelegationToken(
  any(GetDelegationTokenRequest.class));
yarnRunner.addHistoryToken(creds);
verify(mockHsProxy, times(0)).getDelegationToken(
  any(GetDelegationTokenRequest.class));
yarnRunner.addHistoryToken(creds);
verify(mockHsProxy, times(0)).getDelegationToken(
  any(GetDelegationTokenRequest.class));
yarnRunner.addHistoryToken(creds);
verify(mockHsProxy, times(1)).getDelegationToken(
  any(GetDelegationTokenRequest.class));
yarnRunner.addHistoryToken(creds);
verify(mockHsProxy, times(1)).getDelegationToken(
  any(GetDelegationTokenRequest.class));

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-jobclient

createApplicationResource(defaultFileContext,
    jobConfPath, LocalResourceType.FILE));
if (jobConf.get(MRJobConfig.JAR) != null) {
 Path jobJarPath = new Path(jobConf.get(MRJobConfig.JAR));
 LocalResource rc = createApplicationResource(
   FileContext.getFileContext(jobJarPath.toUri(), jobConf),
   jobJarPath,
 localResources.put(
   MRJobConfig.JOB_SUBMIT_DIR + "/" + s,
   createApplicationResource(defaultFileContext,
     new Path(jobSubmitDir, s), LocalResourceType.FILE));
warnForJavaLibPath(conf.get(MRJobConfig.MAP_JAVA_OPTS,""), "map", 
  MRJobConfig.MAP_JAVA_OPTS, MRJobConfig.MAP_ENV);
warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS,""), "map", 
  MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS, MRJobConfig.MAPRED_ADMIN_USER_ENV);
warnForJavaLibPath(conf.get(MRJobConfig.REDUCE_JAVA_OPTS,""), "reduce", 
  MRJobConfig.REDUCE_JAVA_OPTS, MRJobConfig.REDUCE_ENV);
warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS,""), "reduce", 
  MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS, MRJobConfig.MAPRED_ADMIN_USER_ENV);
warnForJavaLibPath(mrAppMasterAdminOptions, "app master", 
  MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, MRJobConfig.MR_AM_ADMIN_USER_ENV);
vargs.add(mrAppMasterAdminOptions);
warnForJavaLibPath(mrAppMasterUserOptions, "app master", 
  MRJobConfig.MR_AM_COMMAND_OPTS, MRJobConfig.MR_AM_ENV);
vargs.add(mrAppMasterUserOptions);

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-jobclient

@VisibleForTesting
void addHistoryToken(Credentials ts) throws IOException, InterruptedException {
 /* check if we have a hsproxy, if not, no need */
 MRClientProtocol hsProxy = clientCache.getInitializedHSProxy();
 if (UserGroupInformation.isSecurityEnabled() && (hsProxy != null)) {
  /*
   * note that get delegation token was called. Again this is hack for oozie
   * to make sure we add history server delegation tokens to the credentials
   */
  RMDelegationTokenSelector tokenSelector = new RMDelegationTokenSelector();
  Text service = resMgrDelegate.getRMDelegationTokenService();
  if (tokenSelector.selectToken(service, ts.getAllTokens()) != null) {
   Text hsService = SecurityUtil.buildTokenService(hsProxy
     .getConnectAddress());
   if (ts.getToken(hsService) == null) {
    ts.addToken(hsService, getDelegationTokenFromHS(hsProxy));
   }
  }
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclient

private ApplicationSubmissionContext buildSubmitContext(
   YARNRunner yarnRunner, JobConf jobConf) throws IOException {
  File jobxml = new File(testWorkDir, MRJobConfig.JOB_CONF_FILE);
  OutputStream out = new FileOutputStream(jobxml);
  conf.writeXml(out);
  out.close();

  File jobsplit = new File(testWorkDir, MRJobConfig.JOB_SPLIT);
  out = new FileOutputStream(jobsplit);
  out.close();

  File jobsplitmetainfo = new File(testWorkDir,
    MRJobConfig.JOB_SPLIT_METAINFO);
  out = new FileOutputStream(jobsplitmetainfo);
  out.close();

  return yarnRunner.createApplicationSubmissionContext(jobConf,
    testWorkDir.toString(), new Credentials());
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-jobclient

private void killUnFinishedApplication(ApplicationId appId)
  throws IOException {
 ApplicationReport application = null;
 try {
  application = resMgrDelegate.getApplicationReport(appId);
 } catch (YarnException e) {
  throw new IOException(e);
 }
 if (application.getYarnApplicationState() == YarnApplicationState.FINISHED
   || application.getYarnApplicationState() == YarnApplicationState.FAILED
   || application.getYarnApplicationState() == YarnApplicationState.KILLED) {
  return;
 }
 killApplication(appId);
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-jobclient

warnForJavaLibPath(conf.get(MRJobConfig.MAP_JAVA_OPTS, ""),
  "map",
  MRJobConfig.MAP_JAVA_OPTS,
  MRJobConfig.MAP_ENV);
warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS, ""),
  "map",
  MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS,
  MRJobConfig.MAPRED_ADMIN_USER_ENV);
warnForJavaLibPath(conf.get(MRJobConfig.REDUCE_JAVA_OPTS, ""),
  "reduce",
  MRJobConfig.REDUCE_JAVA_OPTS,
  MRJobConfig.REDUCE_ENV);
warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS, ""),
  "reduce",
  MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS,
warnForJavaLibPath(mrAppMasterAdminOptions, "app master",
  MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, MRJobConfig.MR_AM_ADMIN_USER_ENV);
vargs.add(mrAppMasterAdminOptions);
warnForJavaLibPath(mrAppMasterUserOptions, "app master",
  MRJobConfig.MR_AM_COMMAND_OPTS, MRJobConfig.MR_AM_ENV);
vargs.add(mrAppMasterUserOptions);

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-jobclient

@Override
public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
throws IOException, InterruptedException {
 
 addHistoryToken(ts);
 ApplicationSubmissionContext appContext =
  createApplicationSubmissionContext(conf, jobSubmitDir, ts);
 // Submit to ResourceManager
 try {
  ApplicationId applicationId =
    resMgrDelegate.submitApplication(appContext);
  ApplicationReport appMaster = resMgrDelegate
    .getApplicationReport(applicationId);
  String diagnostics =
    (appMaster == null ?
      "application report is null" : appMaster.getDiagnostics());
  if (appMaster == null
    || appMaster.getYarnApplicationState() == YarnApplicationState.FAILED
    || appMaster.getYarnApplicationState() == YarnApplicationState.KILLED) {
   throw new IOException("Failed to run job : " +
     diagnostics);
  }
  return clientCache.getClient(jobId).getJobStatus(jobId);
 } catch (YarnException e) {
  throw new IOException(e);
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-jobclient

killUnFinishedApplication(appId);
 return;
 killApplication(appId);
 return;
          MRJobConfig.DEFAULT_MR_AM_HARD_KILL_TIMEOUT_MS);
 while ((currentTimeMillis < timeKillIssued + killTimeOut)
   && !isJobInTerminalState(status)) {
  try {
   Thread.sleep(1000L);
  status = clientCache.getClient(arg0).getJobStatus(arg0);
  if (status == null) {
   killUnFinishedApplication(appId);
   return;
 LOG.debug("Error when checking for application status", io);
if (status != null && !isJobInTerminalState(status)) {
 killApplication(appId);

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-jobclient

@VisibleForTesting
void addHistoryToken(Credentials ts) throws IOException, InterruptedException {
 /* check if we have a hsproxy, if not, no need */
 MRClientProtocol hsProxy = clientCache.getInitializedHSProxy();
 if (UserGroupInformation.isSecurityEnabled() && (hsProxy != null)) {
  /*
   * note that get delegation token was called. Again this is hack for oozie
   * to make sure we add history server delegation tokens to the credentials
   */
  RMDelegationTokenSelector tokenSelector = new RMDelegationTokenSelector();
  Text service = resMgrDelegate.getRMDelegationTokenService();
  if (tokenSelector.selectToken(service, ts.getAllTokens()) != null) {
   Text hsService = SecurityUtil.buildTokenService(hsProxy
     .getConnectAddress());
   if (ts.getToken(hsService) == null) {
    ts.addToken(hsService, getDelegationTokenFromHS(hsProxy));
   }
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-jobclient

private void killUnFinishedApplication(ApplicationId appId)
  throws IOException {
 ApplicationReport application = null;
 try {
  application = resMgrDelegate.getApplicationReport(appId);
 } catch (YarnException e) {
  throw new IOException(e);
 }
 if (application.getYarnApplicationState() == YarnApplicationState.FINISHED
   || application.getYarnApplicationState() == YarnApplicationState.FAILED
   || application.getYarnApplicationState() == YarnApplicationState.KILLED) {
  return;
 }
 killApplication(appId);
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-jobclient

@Override
public ClientProtocol create(Configuration conf) throws IOException {
 if (MRConfig.YARN_FRAMEWORK_NAME.equals(conf.get(MRConfig.FRAMEWORK_NAME))) {
  return new YARNRunner(conf);
 }
 return null;
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-jobclient

@Override
 public void close(ClientProtocol clientProtocol) throws IOException {
  if (clientProtocol instanceof YARNRunner) {
   ((YARNRunner)clientProtocol).close();
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-jobclient

createApplicationResource(defaultFileContext,
    jobConfPath, LocalResourceType.FILE));
if (jobConf.get(MRJobConfig.JAR) != null) {
       ? LocalResourceVisibility.PUBLIC
       : LocalResourceVisibility.APPLICATION;
 LocalResource rc = createApplicationResource(
   FileContext.getFileContext(jobJarPath.toUri(), jobConf), jobJarPath,
   MRJobConfig.JOB_JAR, LocalResourceType.PATTERN, jobJarViz,
 localResources.put(
   MRJobConfig.JOB_SUBMIT_DIR + "/" + s,
   createApplicationResource(defaultFileContext,
     new Path(jobSubmitDir, s), LocalResourceType.FILE));

29 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com