use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class TestLocalContainerAllocator method testRMConnectionRetry.
@Test
public void testRMConnectionRetry() throws Exception {
// verify the connection exception is thrown
// if we haven't exhausted the retry interval
ApplicationMasterProtocol mockScheduler = mock(ApplicationMasterProtocol.class);
when(mockScheduler.allocate(isA(AllocateRequest.class))).thenThrow(RPCUtil.getRemoteException(new IOException("forcefail")));
Configuration conf = new Configuration();
LocalContainerAllocator lca = new StubbedLocalContainerAllocator(mockScheduler);
lca.init(conf);
lca.start();
try {
lca.heartbeat();
Assert.fail("heartbeat was supposed to throw");
} catch (YarnException e) {
// YarnException is expected
} finally {
lca.stop();
}
// verify YarnRuntimeException is thrown when the retry interval has expired
conf.setLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS, 0);
lca = new StubbedLocalContainerAllocator(mockScheduler);
lca.init(conf);
lca.start();
try {
lca.heartbeat();
Assert.fail("heartbeat was supposed to throw");
} catch (YarnRuntimeException e) {
// YarnRuntimeException is expected
} finally {
lca.stop();
}
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class TestRPCFactories method testPbServerFactory.
private void testPbServerFactory() {
InetSocketAddress addr = new InetSocketAddress(0);
Configuration conf = new Configuration();
MRClientProtocol instance = new MRClientProtocolTestImpl();
Server server = null;
try {
server = RpcServerFactoryPBImpl.get().getServer(MRClientProtocol.class, instance, addr, conf, null, 1);
server.start();
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete server");
} finally {
server.stop();
}
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class TestRecordFactory method testPbRecordFactory.
@Test
public void testPbRecordFactory() {
RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
try {
CounterGroup response = pbRecordFactory.newRecordInstance(CounterGroup.class);
Assert.assertEquals(CounterGroupPBImpl.class, response.getClass());
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
try {
GetCountersRequest response = pbRecordFactory.newRecordInstance(GetCountersRequest.class);
Assert.assertEquals(GetCountersRequestPBImpl.class, response.getClass());
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class HistoryFileManager method createHistoryDirs.
@VisibleForTesting
void createHistoryDirs(Clock clock, long intervalCheckMillis, long timeOutMillis) throws IOException {
long start = clock.getTime();
boolean done = false;
int counter = 0;
while (!done && ((timeOutMillis == -1) || (clock.getTime() - start < timeOutMillis))) {
// log every 3 attempts, 30sec
done = tryCreatingHistoryDirs(counter++ % 3 == 0);
if (done) {
break;
}
try {
Thread.sleep(intervalCheckMillis);
} catch (InterruptedException ex) {
throw new YarnRuntimeException(ex);
}
}
if (!done) {
throw new YarnRuntimeException("Timed out '" + timeOutMillis + "ms' waiting for FileSystem to become available");
}
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class JobHistoryServer method serviceInit.
@Override
protected void serviceInit(Configuration conf) throws Exception {
Configuration config = new YarnConfiguration(conf);
config.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true);
// This is required for WebApps to use https if enabled.
MRWebAppUtil.initialize(getConfig());
try {
doSecureLogin(conf);
} catch (IOException ie) {
throw new YarnRuntimeException("History Server Failed to login", ie);
}
jobHistoryService = new JobHistory();
historyContext = (HistoryContext) jobHistoryService;
stateStore = createStateStore(conf);
this.jhsDTSecretManager = createJHSSecretManager(conf, stateStore);
clientService = createHistoryClientService();
aggLogDelService = new AggregatedLogDeletionService();
hsAdminServer = new HSAdminServer(aggLogDelService, jobHistoryService);
addService(stateStore);
addService(new HistoryServerSecretManagerService());
addService(jobHistoryService);
addService(clientService);
addService(aggLogDelService);
addService(hsAdminServer);
DefaultMetricsSystem.initialize("JobHistoryServer");
JvmMetrics jm = JvmMetrics.initSingleton("JobHistoryServer", null);
pauseMonitor = new JvmPauseMonitor();
addService(pauseMonitor);
jm.setPauseMonitor(pauseMonitor);
super.serviceInit(config);
}
Aggregations