gpt4 book ai didi

org.apache.hadoop.yarn.client.api.YarnClient.start()方法的使用及代码示例

转载 作者:知者 更新时间:2024-03-17 20:18:40 27 4
gpt4 key购买 nike

本文整理了Java中org.apache.hadoop.yarn.client.api.YarnClient.start()方法的一些代码示例,展示了YarnClient.start()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。YarnClient.start()方法的具体详情如下:
包路径:org.apache.hadoop.yarn.client.api.YarnClient
类名称:YarnClient
方法名:start

YarnClient.start介绍

暂无

代码示例

代码示例来源:origin: apache/incubator-gobblin

@VisibleForTesting
void startYarnClient() {
 this.yarnClient.start();
}

代码示例来源:origin: apache/drill

public YarnRMClient(YarnConfiguration conf) {
 this.conf = conf;
 yarnClient = YarnClient.createYarnClient();
 yarnClient.init(conf);
 yarnClient.start();
}

代码示例来源:origin: apache/hive

/**
 * Kills all jobs tagged with the given tag that have been started after the
 * given timestamp.
 */
@Override
public void killJobs(String tag, long timestamp) {
 try {
  LOG.info("Looking for jobs to kill...");
  Set<ApplicationId> childJobs = getYarnChildJobs(tag, timestamp);
  if (childJobs.isEmpty()) {
   LOG.info("No jobs found from");
   return;
  } else {
   LOG.info(String.format("Found MR jobs count: %d", childJobs.size()));
   LOG.info("Killing all found jobs");
   YarnClient yarnClient = YarnClient.createYarnClient();
   yarnClient.init(conf);
   yarnClient.start();
   for (ApplicationId app: childJobs) {
    LOG.info(String.format("Killing job: %s ...", app));
    yarnClient.killApplication(app);
    LOG.info(String.format("Job %s killed", app));
   }
  }
 } catch (YarnException ye) {
  throw new RuntimeException("Exception occurred while killing child job(s)", ye);
 } catch (IOException ioe) {
  throw new RuntimeException("Exception occurred while killing child job(s)", ioe);
 }
}

代码示例来源:origin: apache/flink

private AbstractYarnClusterDescriptor getClusterDescriptor(
      Configuration configuration,
      YarnConfiguration yarnConfiguration,
      String configurationDirectory) {
    final YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(yarnConfiguration);
    yarnClient.start();

    return new YarnClusterDescriptor(
      configuration,
      yarnConfiguration,
      configurationDirectory,
      yarnClient,
      false);
  }
}

代码示例来源:origin: apache/hive

public static void killChildYarnJobs(Configuration conf, String tag) {
 try {
  if (tag == null) {
   return;
  }
  LOG.info("Killing yarn jobs using query tag:" + tag);
  Set<ApplicationId> childYarnJobs = getChildYarnJobs(conf, tag);
  if (!childYarnJobs.isEmpty()) {
   YarnClient yarnClient = YarnClient.createYarnClient();
   yarnClient.init(conf);
   yarnClient.start();
   for (ApplicationId app : childYarnJobs) {
    yarnClient.killApplication(app);
   }
  }
 } catch (IOException | YarnException ye) {
  LOG.warn("Exception occurred while killing child job({})", ye);
 }
}

代码示例来源:origin: apache/hive

public boolean isApplicationAccepted(HiveConf conf, String applicationId) {
  if (applicationId == null) {
   return false;
  }
  YarnClient yarnClient = null;
  try {
   LOG.info("Trying to find " + applicationId);
   ApplicationId appId = getApplicationIDFromString(applicationId);
   yarnClient = YarnClient.createYarnClient();
   yarnClient.init(conf);
   yarnClient.start();
   ApplicationReport appReport = yarnClient.getApplicationReport(appId);
   return appReport != null && appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED;
  } catch (Exception ex) {
   LOG.error("Failed getting application status for: " + applicationId + ": " + ex, ex);
   return false;
  } finally {
   if (yarnClient != null) {
    try {
     yarnClient.stop();
    } catch (Exception ex) {
     LOG.error("Failed to stop yarn client: " + ex, ex);
    }
   }
  }
 }
}

代码示例来源:origin: apache/hive

public static boolean isApplicationAccepted(HiveConf conf, String applicationId) {
 if (applicationId == null) {
  return false;
 }
 YarnClient yarnClient = null;
 try {
  ApplicationId appId = getApplicationIDFromString(applicationId);
  yarnClient = YarnClient.createYarnClient();
  yarnClient.init(conf);
  yarnClient.start();
  ApplicationReport appReport = yarnClient.getApplicationReport(appId);
  return appReport != null && appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED;
 } catch (Exception ex) {
  LOG.error("Failed getting application status for: " + applicationId + ": " + ex, ex);
  return false;
 } finally {
  if (yarnClient != null) {
   try {
    yarnClient.stop();
   } catch (Exception ex) {
    LOG.error("Failed to stop yarn client: " + ex, ex);
   }
  }
 }
}

代码示例来源:origin: alibaba/jstorm

jstormClientContext.yarnClient.start();

代码示例来源:origin: apache/flink

@BeforeClass
public static void setupClass() {
  yarnConfiguration = new YarnConfiguration();
  yarnClient = YarnClient.createYarnClient();
  yarnClient.init(yarnConfiguration);
  yarnClient.start();
}

代码示例来源:origin: Qihoo360/XLearning

yarnClient.start();
LOG.info("Requesting a new application from cluster with " + yarnClient.getYarnClusterMetrics().getNumNodeManagers() + " NodeManagers");
newAPP = yarnClient.createApplication();

代码示例来源:origin: apache/flink

@Before
public void checkClusterEmpty() {
  if (yarnClient == null) {
    yarnClient = YarnClient.createYarnClient();
    yarnClient.init(getYarnConfiguration());
    yarnClient.start();
  }
  flinkConfiguration = new org.apache.flink.configuration.Configuration(globalConfiguration);
}

代码示例来源:origin: apache/flink

YarnClient yc = YarnClient.createYarnClient();
yc.init(YARN_CONFIGURATION);
yc.start();
List<ApplicationReport> apps = yc.getApplications(EnumSet.of(YarnApplicationState.RUNNING));
Assert.assertEquals(1, apps.size()); // Only one running

代码示例来源:origin: apache/drill

@Override
public void start(CallbackHandler resourceCallback,
  org.apache.hadoop.yarn.client.api.async.NMClientAsync.CallbackHandler nodeCallback ) {
 conf = new YarnConfiguration();
 resourceMgr = AMRMClientAsync.createAMRMClientAsync(pollPeriodMs, resourceCallback);
 resourceMgr.init(conf);
 resourceMgr.start();
 // Create the asynchronous node manager client
 nodeMgr = NMClientAsync.createNMClientAsync(nodeCallback);
 nodeMgr.init(conf);
 nodeMgr.start();
 client = YarnClient.createYarnClient();
 client.init(conf);
 client.start();
 String appIdStr = System.getenv(DrillOnYarnConfig.APP_ID_ENV_VAR);
 if (appIdStr != null) {
  appId = ConverterUtils.toApplicationId(appIdStr);
  try {
   appReport = client.getApplicationReport(appId);
  } catch (YarnException | IOException e) {
   LOG.error(
     "Failed to get YARN applicaiton report for App ID: " + appIdStr, e);
  }
 }
}

代码示例来源:origin: apache/ignite

YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();

代码示例来源:origin: apache/incubator-gobblin

this.yarnClient.start();

代码示例来源:origin: apache/flink

/**
 * Tests that the cluster retrieval of a finished YARN application fails.
 */
@Test(expected = ClusterRetrieveException.class)
public void testClusterClientRetrievalOfFinishedYarnApplication() throws Exception {
  final ApplicationId applicationId = ApplicationId.newInstance(System.currentTimeMillis(), 42);
  final ApplicationReport applicationReport = createApplicationReport(
    applicationId,
    YarnApplicationState.FINISHED,
    FinalApplicationStatus.SUCCEEDED);
  final YarnClient yarnClient = new TestingYarnClient(Collections.singletonMap(applicationId, applicationReport));
  final YarnConfiguration yarnConfiguration = new YarnConfiguration();
  yarnClient.init(yarnConfiguration);
  yarnClient.start();
  final TestingAbstractYarnClusterDescriptor clusterDescriptor = new TestingAbstractYarnClusterDescriptor(
    new Configuration(),
    yarnConfiguration,
    temporaryFolder.newFolder().getAbsolutePath(),
    yarnClient,
    false);
  try {
    clusterDescriptor.retrieve(applicationId);
  } finally {
    clusterDescriptor.close();
  }
}

代码示例来源:origin: apache/flink

closableYarnClient.start();

代码示例来源:origin: uber/AthenaX

ClusterInfo(String name, YarnClusterConfiguration conf) {
 this.name = name;
 this.client = YarnClient.createYarnClient();
 client.init(conf.conf());
 client.start();
 this.conf = conf;
}

代码示例来源:origin: uber/AthenaX

YarnConfiguration conf = cluster.getYarnConfiguration();
client.init(conf);
client.start();
UUID jobUUID = UUID.randomUUID();
try (InstanceManager manager = new InstanceManager(

代码示例来源:origin: uber/AthenaX

@Test
 public void testCreateAthenaXCluster() throws Exception {
  ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
  Configuration flinkConf = new Configuration();
  flinkConf.setString(JobManagerOptions.ADDRESS, "localhost");

  try (MiniAthenaXCluster cluster = new MiniAthenaXCluster(JobDeployerITest.class.getSimpleName())) {
   cluster.start();
   YarnConfiguration conf = cluster.getYarnConfiguration();
   YarnClusterConfiguration clusterConf = cluster.getYarnClusterConf();

   final ApplicationId appId;
   try (YarnClient client = YarnClient.createYarnClient()) {
    client.init(conf);
    client.start();

    JobDeployer deployer = new JobDeployer(clusterConf, client, executor, flinkConf);
    appId = deployer.createApplication();
    InstanceMetadata md = new InstanceMetadata(UUID.randomUUID(), UUID.randomUUID());
    JobDefinitionResource resource = new JobDefinitionResource()
      .queue(null).vCores(1L).executionSlots(1L).memory(2048L);
    JobConf jobConf = new JobConf(appId, "test", Collections.emptyList(), resource, md);
    deployer.start(JobITestUtil.trivialJobGraph(), jobConf);

    YarnApplicationState state = MiniAthenaXCluster.pollFinishedApplicationState(client, appId);
    assertEquals(FINISHED, state);
   }
  }
 }
}

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com