gpt4 book ai didi

org.apache.hadoop.yarn.conf.YarnConfiguration.getInt()方法的使用及代码示例

转载 作者:知者 更新时间:2024-03-18 01:48:40 25 4
gpt4 key购买 nike

本文整理了Java中org.apache.hadoop.yarn.conf.YarnConfiguration.getInt()方法的一些代码示例,展示了YarnConfiguration.getInt()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。YarnConfiguration.getInt()方法的具体详情如下:
包路径:org.apache.hadoop.yarn.conf.YarnConfiguration
类名称:YarnConfiguration
方法名:getInt

YarnConfiguration.getInt介绍

暂无

代码示例

代码示例来源:origin: apache/flink

YarnConfiguration yarnConf = mock(YarnConfiguration.class);
doAnswer(getDefault).when(yarnConf).get(anyString(), anyString());
doAnswer(getDefault).when(yarnConf).getInt(anyString(), anyInt());
doAnswer(new Answer() {
  @Override

代码示例来源:origin: apache/flink

final int yarnMinAllocationMB = yarnConfiguration.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);

代码示例来源:origin: com.github.jiayuhan-it/hadoop-yarn-common

@Override
public void init(Configuration configuration, RMProxy<T> rmProxy,
         Class<T> protocol) {
 this.rmProxy = rmProxy;
 this.protocol = protocol;
 this.rmProxy.checkAllowedProtocols(this.protocol);
 this.conf = new YarnConfiguration(configuration);
 Collection<String> rmIds = HAUtil.getRMHAIds(conf);
 this.rmServiceIds = rmIds.toArray(new String[rmIds.size()]);
 conf.set(YarnConfiguration.RM_HA_ID, rmServiceIds[currentProxyIndex]);
 conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
   conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES,
     YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES));
 conf.setInt(CommonConfigurationKeysPublic.
   IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
   conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS,
     YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS));
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-common

@Override
public void init(Configuration configuration, RMProxy<T> rmProxy,
         Class<T> protocol) {
 this.rmProxy = rmProxy;
 this.protocol = protocol;
 this.rmProxy.checkAllowedProtocols(this.protocol);
 this.conf = new YarnConfiguration(configuration);
 Collection<String> rmIds = HAUtil.getRMHAIds(conf);
 this.rmServiceIds = rmIds.toArray(new String[rmIds.size()]);
 conf.set(YarnConfiguration.RM_HA_ID, rmServiceIds[currentProxyIndex]);
 conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
   conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES,
     YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES));
 conf.setInt(CommonConfigurationKeysPublic.
   IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
   conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS,
     YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS));
}

代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-common

@Override
public void init(Configuration configuration, RMProxy<T> rmProxy,
         Class<T> protocol) {
 this.rmProxy = rmProxy;
 this.protocol = protocol;
 this.rmProxy.checkAllowedProtocols(this.protocol);
 this.conf = new YarnConfiguration(configuration);
 Collection<String> rmIds = HAUtil.getRMHAIds(conf);
 this.rmServiceIds = rmIds.toArray(new String[rmIds.size()]);
 conf.set(YarnConfiguration.RM_HA_ID, rmServiceIds[currentProxyIndex]);
 conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
   conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES,
     YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES));
 conf.setInt(CommonConfigurationKeysPublic.
   IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
   conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS,
     YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS));
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-registry

@Override
public void init(DaemonContext context) throws Exception {
 String[] args = context.getArguments();
 StringUtils.startupShutdownMessage(RegistryDNSServer.class, args, LOG);
 conf = new YarnConfiguration();
 new GenericOptionsParser(conf, args);
 int port = conf.getInt(KEY_DNS_PORT, DEFAULT_DNS_PORT);
 if (port < 1 || port > 1023) {
  throw new RuntimeException("Must start privileged registry DNS server " +
    "with '" + KEY_DNS_PORT + "' configured to a privileged port.");
 }
 try {
  registryDNS = (RegistryDNS) DNSOperationsFactory.createInstance(conf);
  registryDNS.initializeChannels(conf);
 } catch (Exception e) {
  LOG.error("Error initializing Registry DNS", e);
  throw e;
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-common

private static HAServiceState getHAState(YarnConfiguration yarnConf)
  throws Exception {
 HAServiceTarget haServiceTarget;
 int rpcTimeoutForChecks =
   yarnConf.getInt(CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY,
     CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);
 yarnConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
   yarnConf.get(YarnConfiguration.RM_PRINCIPAL, ""));
 haServiceTarget = new RMHAServiceTarget(yarnConf);
 HAServiceProtocol proto =
   haServiceTarget.getProxy(yarnConf, rpcTimeoutForChecks);
 HAServiceState haState = proto.getServiceStatus().getState();
 return haState;
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-yarn-common

private static HAServiceState getHAState(YarnConfiguration yarnConf)
  throws Exception {
 HAServiceTarget haServiceTarget;
 int rpcTimeoutForChecks =
   yarnConf.getInt(CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY,
     CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);
 yarnConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
   yarnConf.get(YarnConfiguration.RM_PRINCIPAL, ""));
 haServiceTarget = new RMHAServiceTarget(yarnConf);
 HAServiceProtocol proto =
   haServiceTarget.getProxy(yarnConf, rpcTimeoutForChecks);
 HAServiceState haState = proto.getServiceStatus().getState();
 return haState;
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-common

private static HAServiceState getHAState(YarnConfiguration yarnConf)
  throws Exception {
 HAServiceTarget haServiceTarget;
 int rpcTimeoutForChecks =
   yarnConf.getInt(CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY,
     CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);
 yarnConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
   yarnConf.get(YarnConfiguration.RM_PRINCIPAL, ""));
 haServiceTarget = new RMHAServiceTarget(yarnConf);
 HAServiceProtocol proto =
   haServiceTarget.getProxy(yarnConf, rpcTimeoutForChecks);
 HAServiceState haState = proto.getServiceStatus().getState();
 return haState;
}

代码示例来源:origin: org.apache.flink/flink-yarn

final int yarnMinAllocationMB = yarnConfiguration.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);

代码示例来源:origin: org.apache.flink/flink-yarn_2.11

final int yarnMinAllocationMB = yarnConfiguration.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);

代码示例来源:origin: dremio/dremio-oss

assertEquals(onHeapMemory, yarnConfig.getInt(DacDaemonYarnApplication.YARN_MEMORY_ON_HEAP, 0));
assertEquals(offHeapMemory, yarnConfig.getInt(DacDaemonYarnApplication.YARN_MEMORY_OFF_HEAP, 0));
assertEquals(offHeapMemory, yarnConfig.getInt(JAVA_RESERVED_MEMORY_MB, 0));

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

private void testMinimumAllocation(YarnConfiguration conf, int testAlloc)
  throws Exception {
 MockRM rm = new MockRM(conf);
 rm.start();
 // Register node1
 MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
 // Submit an application
 RMApp app1 = rm.submitApp(testAlloc);
 // kick the scheduling
 nm1.nodeHeartbeat(true);
 RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
 MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
 am1.registerAppAttempt();
 SchedulerNodeReport report_nm1 =
   rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
 int checkAlloc =
   conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
     YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
 Assert.assertEquals(checkAlloc, report_nm1.getUsedResource().getMemorySize());
 rm.stop();
}

代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager

private void testMinimumAllocation(YarnConfiguration conf, int testAlloc)
  throws Exception {
 MockRM rm = new MockRM(conf);
 rm.start();
 // Register node1
 MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
 // Submit an application
 RMApp app1 = rm.submitApp(testAlloc);
 // kick the scheduling
 nm1.nodeHeartbeat(true);
 RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
 MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
 am1.registerAppAttempt();
 SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
   nm1.getNodeId());
 int checkAlloc =
   conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
     YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
 Assert.assertEquals(checkAlloc, report_nm1.getUsedResource().getMemory());
 rm.stop();
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

null, conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
   YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null);
ApplicationAttemptId unmanagedAttemptId =

代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager

null, conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
   YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null);
ApplicationAttemptId unmanagedAttemptId =

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
int maxAttempt =
  conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
    YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com