use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class CapacitySchedulerConfiguration method getQueueOrderingPolicy.
@Private
public QueueOrderingPolicy getQueueOrderingPolicy(String queue, String parentPolicy) {
String defaultPolicy = parentPolicy;
if (null == defaultPolicy) {
defaultPolicy = DEFAULT_QUEUE_ORDERING_POLICY;
}
String policyType = get(getQueuePrefix(queue) + ORDERING_POLICY, defaultPolicy);
QueueOrderingPolicy qop;
if (policyType.trim().equals(QUEUE_UTILIZATION_ORDERING_POLICY)) {
// Doesn't respect priority
qop = new PriorityUtilizationQueueOrderingPolicy(false);
} else if (policyType.trim().equals(QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY)) {
qop = new PriorityUtilizationQueueOrderingPolicy(true);
} else {
String message = "Unable to construct queue ordering policy=" + policyType + " queue=" + queue;
throw new YarnRuntimeException(message);
}
return qop;
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class MiniYARNCluster method startResourceManager.
private synchronized void startResourceManager(final int index) {
try {
resourceManagers[index].start();
if (resourceManagers[index].getServiceState() != STATE.STARTED) {
// RM could have failed.
throw new IOException("ResourceManager failed to start. Final state is " + resourceManagers[index].getServiceState());
}
} catch (Throwable t) {
throw new YarnRuntimeException(t);
}
Configuration conf = resourceManagers[index].getConfig();
LOG.info("MiniYARN ResourceManager address: " + conf.get(YarnConfiguration.RM_ADDRESS));
LOG.info("MiniYARN ResourceManager web address: " + WebAppUtils.getRMWebAppURLWithoutScheme(conf));
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class SharedCacheManager method createSCMStoreService.
@SuppressWarnings("unchecked")
private static SCMStore createSCMStoreService(Configuration conf) {
Class<? extends SCMStore> defaultStoreClass;
try {
defaultStoreClass = (Class<? extends SCMStore>) Class.forName(YarnConfiguration.DEFAULT_SCM_STORE_CLASS);
} catch (Exception e) {
throw new YarnRuntimeException("Invalid default scm store class" + YarnConfiguration.DEFAULT_SCM_STORE_CLASS, e);
}
SCMStore store = ReflectionUtils.newInstance(conf.getClass(YarnConfiguration.SCM_STORE_CLASS, defaultStoreClass, SCMStore.class), conf);
return store;
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class TestRMHA method innerTestHAWithRMHostName.
public void innerTestHAWithRMHostName(boolean includeBindHost) {
//this is run two times, with and without a bind host configured
if (includeBindHost) {
configuration.set(YarnConfiguration.RM_BIND_HOST, "9.9.9.9");
}
//test if both RM_HOSTBANE_{rm_id} and RM_RPCADDRESS_{rm_id} are set
//We should only read rpc addresses from RM_RPCADDRESS_{rm_id} configuration
configuration.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, RM1_NODE_ID), "1.1.1.1");
configuration.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, RM2_NODE_ID), "0.0.0.0");
configuration.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, RM3_NODE_ID), "2.2.2.2");
try {
Configuration conf = new YarnConfiguration(configuration);
rm = new MockRM(conf);
rm.init(conf);
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
assertEquals("RPC address not set for " + confKey, RM1_ADDRESS, conf.get(HAUtil.addSuffix(confKey, RM1_NODE_ID)));
assertEquals("RPC address not set for " + confKey, RM2_ADDRESS, conf.get(HAUtil.addSuffix(confKey, RM2_NODE_ID)));
assertEquals("RPC address not set for " + confKey, RM3_ADDRESS, conf.get(HAUtil.addSuffix(confKey, RM3_NODE_ID)));
if (includeBindHost) {
assertEquals("Web address misconfigured WITH bind-host", rm.webAppAddress.substring(0, 7), "9.9.9.9");
} else {
//YarnConfiguration tries to figure out which rm host it's on by binding to it,
//which doesn't happen for any of these fake addresses, so we end up with 0.0.0.0
assertEquals("Web address misconfigured WITHOUT bind-host", rm.webAppAddress.substring(0, 7), "0.0.0.0");
}
}
} catch (YarnRuntimeException e) {
fail("Should not throw any exceptions.");
}
//test if only RM_HOSTBANE_{rm_id} is set
configuration.clear();
configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
configuration.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
configuration.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, RM1_NODE_ID), "1.1.1.1");
configuration.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, RM2_NODE_ID), "0.0.0.0");
try {
Configuration conf = new YarnConfiguration(configuration);
rm = new MockRM(conf);
rm.init(conf);
assertEquals("RPC address not set for " + YarnConfiguration.RM_ADDRESS, "1.1.1.1:8032", conf.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS, RM1_NODE_ID)));
assertEquals("RPC address not set for " + YarnConfiguration.RM_ADDRESS, "0.0.0.0:8032", conf.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS, RM2_NODE_ID)));
} catch (YarnRuntimeException e) {
fail("Should not throw any exceptions.");
}
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class NodeTimelineCollectorManager method startWebApp.
/**
* Launch the REST web server for this collector manager.
*/
private void startWebApp() {
Configuration conf = getConfig();
String bindAddress = conf.get(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST) + ":0";
try {
HttpServer2.Builder builder = new HttpServer2.Builder().setName("timeline").setConf(conf).addEndpoint(URI.create((YarnConfiguration.useHttps(conf) ? "https://" : "http://") + bindAddress));
timelineRestServer = builder.build();
// TODO: replace this by an authentication filter in future.
HashMap<String, String> options = new HashMap<>();
String username = conf.get(HADOOP_HTTP_STATIC_USER, DEFAULT_HADOOP_HTTP_STATIC_USER);
options.put(HADOOP_HTTP_STATIC_USER, username);
HttpServer2.defineFilter(timelineRestServer.getWebAppContext(), "static_user_filter_timeline", StaticUserWebFilter.StaticUserFilter.class.getName(), options, new String[] { "/*" });
timelineRestServer.addJerseyResourcePackage(TimelineCollectorWebService.class.getPackage().getName() + ";" + GenericExceptionHandler.class.getPackage().getName() + ";" + YarnJacksonJaxbJsonProvider.class.getPackage().getName(), "/*");
timelineRestServer.setAttribute(COLLECTOR_MANAGER_ATTR_KEY, this);
timelineRestServer.start();
} catch (Exception e) {
String msg = "The per-node collector webapp failed to start.";
LOG.error(msg, e);
throw new YarnRuntimeException(msg, e);
}
//TODO: We need to think of the case of multiple interfaces
this.timelineRestServerBindAddress = WebAppUtils.getResolvedAddress(timelineRestServer.getConnectorAddress(0));
LOG.info("Instantiated the per-node collector webapp at " + timelineRestServerBindAddress);
}
Aggregations