gpt4 book ai didi

org.apache.hadoop.yarn.client.api.YarnClient.createYarnClient()方法的使用及代码示例

转载 作者:知者 更新时间:2024-03-17 20:28:40 26 4
gpt4 key购买 nike

本文整理了Java中org.apache.hadoop.yarn.client.api.YarnClient.createYarnClient()方法的一些代码示例,展示了YarnClient.createYarnClient()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。YarnClient.createYarnClient()方法的具体详情如下:
包路径:org.apache.hadoop.yarn.client.api.YarnClient
类名称:YarnClient
方法名:createYarnClient

YarnClient.createYarnClient介绍

[英]Create a new instance of YarnClient.
[中]创建YarnClient的新实例。

代码示例

代码示例来源:origin: alibaba/jstorm

JstormOnYarn(String appMasterMainClass, Configuration conf) {
  this.jstormClientContext.conf = conf;
  this.appMasterMainClass = appMasterMainClass;
  jstormClientContext.yarnClient = YarnClient.createYarnClient();
  jstormClientContext.yarnClient.init(conf);
  jstormClientContext.opts = JstormYarnUtils.initClientOptions();
}

代码示例来源:origin: apache/drill

public YarnRMClient(YarnConfiguration conf) {
 this.conf = conf;
 yarnClient = YarnClient.createYarnClient();
 yarnClient.init(conf);
 yarnClient.start();
}

代码示例来源:origin: apache/incubator-gobblin

public GobblinYarnAppLauncher(Config config, YarnConfiguration yarnConfiguration) throws IOException {
 this.config = config;
 this.applicationName = config.getString(GobblinYarnConfigurationKeys.APPLICATION_NAME_KEY);
 this.appQueueName = config.getString(GobblinYarnConfigurationKeys.APP_QUEUE_KEY);
 String zkConnectionString = config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
 LOGGER.info("Using ZooKeeper connection string: " + zkConnectionString);
 this.helixManager = HelixManagerFactory.getZKHelixManager(
   config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY), GobblinClusterUtils.getHostname(),
   InstanceType.SPECTATOR, zkConnectionString);
 this.yarnConfiguration = yarnConfiguration;
 this.yarnConfiguration.set("fs.automatic.close", "false");
 this.yarnClient = YarnClient.createYarnClient();
 this.yarnClient.init(this.yarnConfiguration);
 this.fs = config.hasPath(ConfigurationKeys.FS_URI_KEY) ?
   FileSystem.get(URI.create(config.getString(ConfigurationKeys.FS_URI_KEY)), this.yarnConfiguration) :
   FileSystem.get(this.yarnConfiguration);
 this.closer.register(this.fs);
 this.applicationStatusMonitor = Executors.newSingleThreadScheduledExecutor(
   ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("GobblinYarnAppStatusMonitor")));
 this.appReportIntervalMinutes = config.getLong(GobblinYarnConfigurationKeys.APP_REPORT_INTERVAL_MINUTES_KEY);
 this.appMasterJvmArgs = config.hasPath(GobblinYarnConfigurationKeys.APP_MASTER_JVM_ARGS_KEY) ?
   Optional.of(config.getString(GobblinYarnConfigurationKeys.APP_MASTER_JVM_ARGS_KEY)) :
   Optional.<String>absent();
 this.sinkLogRootDir = new Path(config.getString(GobblinYarnConfigurationKeys.LOGS_SINK_ROOT_DIR_KEY));
 this.maxGetApplicationReportFailures = config.getInt(GobblinYarnConfigurationKeys.MAX_GET_APP_REPORT_FAILURES_KEY);
 this.emailNotificationOnShutdown =
   config.getBoolean(GobblinYarnConfigurationKeys.EMAIL_NOTIFICATION_ON_SHUTDOWN_KEY);
}

代码示例来源:origin: apache/hive

/**
 * Kills all jobs tagged with the given tag that have been started after the
 * given timestamp.
 */
@Override
public void killJobs(String tag, long timestamp) {
 try {
  LOG.info("Looking for jobs to kill...");
  Set<ApplicationId> childJobs = getYarnChildJobs(tag, timestamp);
  if (childJobs.isEmpty()) {
   LOG.info("No jobs found from");
   return;
  } else {
   LOG.info(String.format("Found MR jobs count: %d", childJobs.size()));
   LOG.info("Killing all found jobs");
   YarnClient yarnClient = YarnClient.createYarnClient();
   yarnClient.init(conf);
   yarnClient.start();
   for (ApplicationId app: childJobs) {
    LOG.info(String.format("Killing job: %s ...", app));
    yarnClient.killApplication(app);
    LOG.info(String.format("Job %s killed", app));
   }
  }
 } catch (YarnException ye) {
  throw new RuntimeException("Exception occurred while killing child job(s)", ye);
 } catch (IOException ioe) {
  throw new RuntimeException("Exception occurred while killing child job(s)", ioe);
 }
}

代码示例来源:origin: apache/flink

private AbstractYarnClusterDescriptor getClusterDescriptor(
      Configuration configuration,
      YarnConfiguration yarnConfiguration,
      String configurationDirectory) {
    final YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(yarnConfiguration);
    yarnClient.start();

    return new YarnClusterDescriptor(
      configuration,
      yarnConfiguration,
      configurationDirectory,
      yarnClient,
      false);
  }
}

代码示例来源:origin: apache/hive

public static void killChildYarnJobs(Configuration conf, String tag) {
 try {
  if (tag == null) {
   return;
  }
  LOG.info("Killing yarn jobs using query tag:" + tag);
  Set<ApplicationId> childYarnJobs = getChildYarnJobs(conf, tag);
  if (!childYarnJobs.isEmpty()) {
   YarnClient yarnClient = YarnClient.createYarnClient();
   yarnClient.init(conf);
   yarnClient.start();
   for (ApplicationId app : childYarnJobs) {
    yarnClient.killApplication(app);
   }
  }
 } catch (IOException | YarnException ye) {
  LOG.warn("Exception occurred while killing child job({})", ye);
 }
}

代码示例来源:origin: apache/hive

public boolean isApplicationAccepted(HiveConf conf, String applicationId) {
  if (applicationId == null) {
   return false;
  }
  YarnClient yarnClient = null;
  try {
   LOG.info("Trying to find " + applicationId);
   ApplicationId appId = getApplicationIDFromString(applicationId);
   yarnClient = YarnClient.createYarnClient();
   yarnClient.init(conf);
   yarnClient.start();
   ApplicationReport appReport = yarnClient.getApplicationReport(appId);
   return appReport != null && appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED;
  } catch (Exception ex) {
   LOG.error("Failed getting application status for: " + applicationId + ": " + ex, ex);
   return false;
  } finally {
   if (yarnClient != null) {
    try {
     yarnClient.stop();
    } catch (Exception ex) {
     LOG.error("Failed to stop yarn client: " + ex, ex);
    }
   }
  }
 }
}

代码示例来源:origin: apache/hive

public static boolean isApplicationAccepted(HiveConf conf, String applicationId) {
 if (applicationId == null) {
  return false;
 }
 YarnClient yarnClient = null;
 try {
  ApplicationId appId = getApplicationIDFromString(applicationId);
  yarnClient = YarnClient.createYarnClient();
  yarnClient.init(conf);
  yarnClient.start();
  ApplicationReport appReport = yarnClient.getApplicationReport(appId);
  return appReport != null && appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED;
 } catch (Exception ex) {
  LOG.error("Failed getting application status for: " + applicationId + ": " + ex, ex);
  return false;
 } finally {
  if (yarnClient != null) {
   try {
    yarnClient.stop();
   } catch (Exception ex) {
    LOG.error("Failed to stop yarn client: " + ex, ex);
   }
  }
 }
}

代码示例来源:origin: apache/flink

@BeforeClass
public static void setupClass() {
  yarnConfiguration = new YarnConfiguration();
  yarnClient = YarnClient.createYarnClient();
  yarnClient.init(yarnConfiguration);
  yarnClient.start();
}

代码示例来源:origin: Qihoo360/XLearning

yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();

代码示例来源:origin: apache/flink

@Before
public void checkClusterEmpty() {
  if (yarnClient == null) {
    yarnClient = YarnClient.createYarnClient();
    yarnClient.init(getYarnConfiguration());
    yarnClient.start();
  }
  flinkConfiguration = new org.apache.flink.configuration.Configuration(globalConfiguration);
}

代码示例来源:origin: apache/flink

YarnClient yc = YarnClient.createYarnClient();
yc.init(YARN_CONFIGURATION);
yc.start();

代码示例来源:origin: apache/drill

@Override
public void start(CallbackHandler resourceCallback,
  org.apache.hadoop.yarn.client.api.async.NMClientAsync.CallbackHandler nodeCallback ) {
 conf = new YarnConfiguration();
 resourceMgr = AMRMClientAsync.createAMRMClientAsync(pollPeriodMs, resourceCallback);
 resourceMgr.init(conf);
 resourceMgr.start();
 // Create the asynchronous node manager client
 nodeMgr = NMClientAsync.createNMClientAsync(nodeCallback);
 nodeMgr.init(conf);
 nodeMgr.start();
 client = YarnClient.createYarnClient();
 client.init(conf);
 client.start();
 String appIdStr = System.getenv(DrillOnYarnConfig.APP_ID_ENV_VAR);
 if (appIdStr != null) {
  appId = ConverterUtils.toApplicationId(appIdStr);
  try {
   appReport = client.getApplicationReport(appId);
  } catch (YarnException | IOException e) {
   LOG.error(
     "Failed to get YARN applicaiton report for App ID: " + appIdStr, e);
  }
 }
}

代码示例来源:origin: apache/ignite

YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();

代码示例来源:origin: apache/incubator-gobblin

this.yarnClient = this.closer.register(YarnClient.createYarnClient());
this.yarnClient.init(clusterConf);
this.yarnClient.start();

代码示例来源:origin: apache/flink

final YarnClient closableYarnClient = YarnClient.createYarnClient();
closableYarnClient.init(yarnConfiguration);
closableYarnClient.start();

代码示例来源:origin: apache/metron

Client(String appMasterMainClass, Configuration conf) {
 this.conf = conf;
 this.appMasterMainClass = appMasterMainClass;
 yarnClient = YarnClient.createYarnClient();
 yarnClient.init(conf);
}

代码示例来源:origin: uber/AthenaX

ClusterInfo(String name, YarnClusterConfiguration conf) {
 this.name = name;
 this.client = YarnClient.createYarnClient();
 client.init(conf.conf());
 client.start();
 this.conf = conf;
}

代码示例来源:origin: uber/AthenaX

doReturn(JobITestUtil.trivialJobGraph()).when(res).jobGraph();
try (YarnClient client = YarnClient.createYarnClient()) {
 ClusterInfo clusterInfo = new ClusterInfo(CLUSTER_NAME, clusterConf, client);
 YarnConfiguration conf = cluster.getYarnConfiguration();

代码示例来源:origin: uber/AthenaX

@Test
 public void testCreateAthenaXCluster() throws Exception {
  ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
  Configuration flinkConf = new Configuration();
  flinkConf.setString(JobManagerOptions.ADDRESS, "localhost");

  try (MiniAthenaXCluster cluster = new MiniAthenaXCluster(JobDeployerITest.class.getSimpleName())) {
   cluster.start();
   YarnConfiguration conf = cluster.getYarnConfiguration();
   YarnClusterConfiguration clusterConf = cluster.getYarnClusterConf();

   final ApplicationId appId;
   try (YarnClient client = YarnClient.createYarnClient()) {
    client.init(conf);
    client.start();

    JobDeployer deployer = new JobDeployer(clusterConf, client, executor, flinkConf);
    appId = deployer.createApplication();
    InstanceMetadata md = new InstanceMetadata(UUID.randomUUID(), UUID.randomUUID());
    JobDefinitionResource resource = new JobDefinitionResource()
      .queue(null).vCores(1L).executionSlots(1L).memory(2048L);
    JobConf jobConf = new JobConf(appId, "test", Collections.emptyList(), resource, md);
    deployer.start(JobITestUtil.trivialJobGraph(), jobConf);

    YarnApplicationState state = MiniAthenaXCluster.pollFinishedApplicationState(client, appId);
    assertEquals(FINISHED, state);
   }
  }
 }
}

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com