gpt4 book ai didi

org.apache.hadoop.yarn.exceptions.YarnRuntimeException类的使用及代码示例

转载 作者:知者 更新时间:2024-03-17 17:56:40 28 4
gpt4 key购买 nike

本文整理了Java中org.apache.hadoop.yarn.exceptions.YarnRuntimeException类的一些代码示例,展示了YarnRuntimeException类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。YarnRuntimeException类的具体详情如下:
包路径:org.apache.hadoop.yarn.exceptions.YarnRuntimeException
类名称:YarnRuntimeException

YarnRuntimeException介绍

[英]Base YARN Exception. NOTE: All derivatives of this exception, which may be thrown by a remote service, must include a String only constructor for the exception to be unwrapped on the client.
[中]基本纱线例外。注意:远程服务可能会引发此异常的所有派生项,必须包含一个仅限字符串的构造函数,以便在客户端上展开该异常。

代码示例

代码示例来源:origin: Qihoo360/XLearning

@Override
protected void serviceInit(Configuration conf) throws Exception {
 Configuration config = new XLearningConfiguration(conf);
 config.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true);
 // This is required for WebApps to use https if enabled.
 XLearningWebAppUtil.initialize(getConfig());
 try {
  doSecureLogin(conf);
 } catch (IOException ie) {
  throw new YarnRuntimeException("History Server Failed to login", ie);
 }
 jobHistoryService = new JobHistory();
 historyContext = (HistoryContext) jobHistoryService;
 stateStore = createStateStore(conf);
 this.jhsDTSecretManager = createJHSSecretManager(conf, stateStore);
 clientService = createHistoryClientService();
 aggLogDelService = new AggregatedLogDeletionService();
 addService(stateStore);
 addService(new HistoryServerSecretManagerService());
 addService(clientService);
 addService(aggLogDelService);
 super.serviceInit(config);
}

代码示例来源:origin: org.springframework.data/spring-yarn-core

/**
 * Constructs YarnSystemException from {@link YarnRuntimeException}.
 *
 * @param e the {@link YarnRuntimeException}
 */
public YarnSystemException(YarnRuntimeException e) {
  super(e.getMessage(), e);
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-common

@Test
public void testPbRecordFactory() {
 RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
 try {
  NodeHeartbeatRequest request = pbRecordFactory.newRecordInstance(NodeHeartbeatRequest.class);
  Assert.assertEquals(NodeHeartbeatRequestPBImpl.class, request.getClass());
 } catch (YarnRuntimeException e) {
  e.printStackTrace();
  Assert.fail("Failed to crete record");
 }
 
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-common

@Test
public void testDeserialize() throws Exception {
 Exception ex = new Exception("test exception");
 SerializedExceptionPBImpl pb = new SerializedExceptionPBImpl();
 try {
  pb.deSerialize();
  Assert.fail("deSerialze should throw YarnRuntimeException");
 } catch (YarnRuntimeException e) {
  Assert.assertEquals(ClassNotFoundException.class,
    e.getCause().getClass());
 }
 pb.init(ex);
 Assert.assertEquals(ex.toString(), pb.deSerialize().toString());
}

代码示例来源:origin: io.hops/hadoop-yarn-common

@Test
public void testPbRecordFactory() {
 RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
 
 try {
  AllocateResponse response =
    pbRecordFactory.newRecordInstance(AllocateResponse.class);
  Assert.assertEquals(AllocateResponsePBImpl.class, response.getClass());
 } catch (YarnRuntimeException e) {
  e.printStackTrace();
  Assert.fail("Failed to crete record");
 }
 
 try {
  AllocateRequest response =
    pbRecordFactory.newRecordInstance(AllocateRequest.class);
  Assert.assertEquals(AllocateRequestPBImpl.class, response.getClass());
 } catch (YarnRuntimeException e) {
  e.printStackTrace();
  Assert.fail("Failed to crete record");
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-common

@Test
public void testDeserialize() throws Exception {
 Exception ex = new Exception("test exception");
 SerializedExceptionPBImpl pb = new SerializedExceptionPBImpl();
 try {
  pb.deSerialize();
  Assert.fail("deSerialze should throw YarnRuntimeException");
 } catch (YarnRuntimeException e) {
  Assert.assertEquals(ClassNotFoundException.class,
    e.getCause().getClass());
 }
 pb.init(ex);
 Assert.assertEquals(ex.toString(), pb.deSerialize().toString());
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-common

public static TaskType
  toYarn(org.apache.hadoop.mapreduce.TaskType taskType) {
 switch (taskType) {
 case MAP:
  return TaskType.MAP;
 case REDUCE:
  return TaskType.REDUCE;
 default:
  throw new YarnRuntimeException("Unrecognized task type: " + taskType);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

@Test
public void testNMExpiryAndHeartbeatIntervalsValidation() throws Exception {
 Configuration conf = new YarnConfiguration();
 conf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1000);
 conf.setLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1001);
 try {
  resourceManager = new MockRM(conf);
 } catch (YarnRuntimeException e) {
  // Exception is expected.
  if (!e.getMessage().startsWith("Nodemanager expiry interval should be no"
    + " less than heartbeat interval")) {
   throw e;
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-common

@Test
public void testPbRecordFactory() {
 RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
 
 try {
  AllocateResponse response =
    pbRecordFactory.newRecordInstance(AllocateResponse.class);
  Assert.assertEquals(AllocateResponsePBImpl.class, response.getClass());
 } catch (YarnRuntimeException e) {
  e.printStackTrace();
  Assert.fail("Failed to crete record");
 }
 
 try {
  AllocateRequest response =
    pbRecordFactory.newRecordInstance(AllocateRequest.class);
  Assert.assertEquals(AllocateRequestPBImpl.class, response.getClass());
 } catch (YarnRuntimeException e) {
  e.printStackTrace();
  Assert.fail("Failed to crete record");
 }
}

代码示例来源:origin: io.hops/hadoop-yarn-common

@Test
public void testDeserialize() throws Exception {
 Exception ex = new Exception("test exception");
 SerializedExceptionPBImpl pb = new SerializedExceptionPBImpl();
 try {
  pb.deSerialize();
  Assert.fail("deSerialze should throw YarnRuntimeException");
 } catch (YarnRuntimeException e) {
  Assert.assertEquals(ClassNotFoundException.class,
    e.getCause().getClass());
 }
 pb.init(ex);
 Assert.assertEquals(ex.toString(), pb.deSerialize().toString());
}

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-common

public static org.apache.hadoop.mapreduce.TaskType fromYarn(
  TaskType taskType) {
 switch (taskType) {
 case MAP:
  return org.apache.hadoop.mapreduce.TaskType.MAP;
 case REDUCE:
  return org.apache.hadoop.mapreduce.TaskType.REDUCE;
 default:
  throw new YarnRuntimeException("Unrecognized task type: " + taskType);
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-app

throw new NotFoundException(e.getMessage());
} catch (NumberFormatException ne) {
 throw new NotFoundException(ne.getMessage());

代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-common

@Test
public void testPbRecordFactory() {
 RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
 
 try {
  AllocateResponse response =
    pbRecordFactory.newRecordInstance(AllocateResponse.class);
  Assert.assertEquals(AllocateResponsePBImpl.class, response.getClass());
 } catch (YarnRuntimeException e) {
  e.printStackTrace();
  Assert.fail("Failed to crete record");
 }
 
 try {
  AllocateRequest response =
    pbRecordFactory.newRecordInstance(AllocateRequest.class);
  Assert.assertEquals(AllocateRequestPBImpl.class, response.getClass());
 } catch (YarnRuntimeException e) {
  e.printStackTrace();
  Assert.fail("Failed to crete record");
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-common

@SuppressWarnings({ "unchecked", "rawtypes" })
@Test(timeout=10000)
public void testDispatcherOnCloseIfQueueEmpty() throws Exception {
 BlockingQueue<Event> eventQueue = spy(new LinkedBlockingQueue<Event>());
 Event event = mock(Event.class);
 doThrow(new InterruptedException()).when(eventQueue).put(event);
 DrainDispatcher disp = new DrainDispatcher(eventQueue);
 disp.init(new Configuration());
 disp.setDrainEventsOnStop();
 disp.start();
 // Wait for event handler thread to start and begin waiting for events.
 disp.waitForEventThreadToWait();
 try {
  disp.getEventHandler().handle(event);
  Assert.fail("Expected YarnRuntimeException");
 } catch (YarnRuntimeException e) {
  Assert.assertTrue(e.getCause() instanceof InterruptedException);
 }
 // Queue should be empty and dispatcher should not hang on close
 Assert.assertTrue("Event Queue should have been empty",
   eventQueue.isEmpty());
 disp.close();
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-router

@Override
public void setNextInterceptor(RESTRequestInterceptor next) {
 throw new YarnRuntimeException("setNextInterceptor is being called on "
   + "FederationInterceptorREST, which should be the last one "
   + "in the chain. Check if the interceptor pipeline configuration "
   + "is correct");
}

代码示例来源:origin: io.hops/hadoop-mapreduce-client-app

throw new NotFoundException(e.getMessage());
} catch (NumberFormatException ne) {
 throw new NotFoundException(ne.getMessage());

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-common

private void testPbClientFactory() {
 InetSocketAddress addr = new InetSocketAddress(0);
 System.err.println(addr.getHostName() + addr.getPort());
 Configuration conf = new Configuration();
 ApplicationMasterProtocol instance = new AMRMProtocolTestImpl();
 Server server = null;
 try {
  server = 
   RpcServerFactoryPBImpl.get().getServer(
     ApplicationMasterProtocol.class, instance, addr, conf, null, 1);
  server.start();
  System.err.println(server.getListenerAddress());
  System.err.println(NetUtils.getConnectAddress(server));
  ApplicationMasterProtocol amrmClient = null;
  try {
   amrmClient = (ApplicationMasterProtocol) RpcClientFactoryPBImpl.get().getClient(ApplicationMasterProtocol.class, 1, NetUtils.getConnectAddress(server), conf);
  } catch (YarnRuntimeException e) {
   e.printStackTrace();
   Assert.fail("Failed to create client");
  }
  
 } catch (YarnRuntimeException e) {
  e.printStackTrace();
  Assert.fail("Failed to create server");
 } finally {
  if (server != null) {
   server.stop();
  }
 }     
}

代码示例来源:origin: io.hops/hadoop-yarn-common

@SuppressWarnings({ "unchecked", "rawtypes" })
@Test(timeout=10000)
public void testDispatcherOnCloseIfQueueEmpty() throws Exception {
 BlockingQueue<Event> eventQueue = spy(new LinkedBlockingQueue<Event>());
 Event event = mock(Event.class);
 doThrow(new InterruptedException()).when(eventQueue).put(event);
 DrainDispatcher disp = new DrainDispatcher(eventQueue);
 disp.init(new Configuration());
 disp.setDrainEventsOnStop();
 disp.start();
 // Wait for event handler thread to start and begin waiting for events.
 disp.waitForEventThreadToWait();
 try {
  disp.getEventHandler().handle(event);
  Assert.fail("Expected YarnRuntimeException");
 } catch (YarnRuntimeException e) {
  Assert.assertTrue(e.getCause() instanceof InterruptedException);
 }
 // Queue should be empty and dispatcher should not hang on close
 Assert.assertTrue("Event Queue should have been empty",
   eventQueue.isEmpty());
 disp.close();
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-router

@Override
public void setNextInterceptor(RESTRequestInterceptor next) {
 throw new YarnRuntimeException("setNextInterceptor is being called on "
   + "DefaultRequestInterceptorREST, which should be the last one "
   + "in the chain. Check if the interceptor pipeline configuration "
   + "is correct");
}

代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager

@Test
public void testNMExpiryAndHeartbeatIntervalsValidation() throws Exception {
 Configuration conf = new YarnConfiguration();
 conf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1000);
 conf.setLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1001);
 resourceManager = new ResourceManager();;
 try {
  resourceManager.init(conf);
 } catch (YarnRuntimeException e) {
  // Exception is expected.
  if (!e.getMessage().startsWith("Nodemanager expiry interval should be no"
    + " less than heartbeat interval")) {
   throw e;
  }
 }
}

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com