use of org.apache.hadoop.ha.HAServiceProtocol in project hadoop by apache.
the class RMHAUtils method getHAState.
private static HAServiceState getHAState(YarnConfiguration yarnConf) throws Exception {
HAServiceTarget haServiceTarget;
int rpcTimeoutForChecks = yarnConf.getInt(CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY, CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);
yarnConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, yarnConf.get(YarnConfiguration.RM_PRINCIPAL, ""));
haServiceTarget = new RMHAServiceTarget(yarnConf);
HAServiceProtocol proto = haServiceTarget.getProxy(yarnConf, rpcTimeoutForChecks);
HAServiceState haState = proto.getServiceStatus().getState();
return haState;
}
use of org.apache.hadoop.ha.HAServiceProtocol in project cdap by caskdata.
the class YarnInfo method getHAWebURL.
/**
* Should only be called when HA is enabled.
*/
private URL getHAWebURL() throws IOException {
InetSocketAddress activeRM = null;
Collection<String> rmIds = HAUtil.getRMHAIds(conf);
if (rmIds.isEmpty()) {
throw new IllegalStateException("Resource Manager HA web URL requested in non-HA mode.");
}
for (String rmId : rmIds) {
try {
YarnConfiguration yarnConf = new YarnConfiguration(conf);
yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
yarnConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(YarnConfiguration.RM_PRINCIPAL, ""));
RMHAServiceTarget rmhaServiceTarget = new RMHAServiceTarget(yarnConf);
HAServiceProtocol proxy = rmhaServiceTarget.getProxy(yarnConf, 10000);
HAServiceStatus serviceStatus = proxy.getServiceStatus();
if (HAServiceProtocol.HAServiceState.ACTIVE != serviceStatus.getState()) {
continue;
}
activeRM = rmhaServiceTarget.getAddress();
} catch (ConnectException e) {
LOG.trace("Connection refused when attempting to connect to ResourceManager {}. " + "Assuming that it is not available.", rmId);
}
}
if (activeRM == null) {
throw new IllegalStateException("Could not find an active resource manager");
}
return adminToWebappAddress(activeRM);
}
use of org.apache.hadoop.ha.HAServiceProtocol in project hadoop by apache.
the class TestNNHealthCheck method doNNHealthCheckTest.
private void doNNHealthCheckTest() throws IOException {
MockNameNodeResourceChecker mockResourceChecker = new MockNameNodeResourceChecker(conf);
cluster.getNameNode(0).getNamesystem().setNNResourceChecker(mockResourceChecker);
NNHAServiceTarget haTarget = new NNHAServiceTarget(conf, DFSUtil.getNamenodeNameServiceId(conf), "nn1");
final String expectedTargetString;
if (conf.get(DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY + "." + DFSUtil.getNamenodeNameServiceId(conf) + ".nn1") != null) {
expectedTargetString = haTarget.getHealthMonitorAddress().toString();
} else {
expectedTargetString = haTarget.getAddress().toString();
}
assertTrue("Expected haTarget " + haTarget + " containing " + expectedTargetString, haTarget.toString().contains(expectedTargetString));
HAServiceProtocol rpc = haTarget.getHealthMonitorProxy(conf, conf.getInt(HA_HM_RPC_TIMEOUT_KEY, HA_HM_RPC_TIMEOUT_DEFAULT));
// Should not throw error, which indicates healthy.
rpc.monitorHealth();
mockResourceChecker.setResourcesAvailable(false);
try {
// Should throw error - NN is unhealthy.
rpc.monitorHealth();
fail("Should not have succeeded in calling monitorHealth");
} catch (HealthCheckFailedException hcfe) {
GenericTestUtils.assertExceptionContains("The NameNode has no resources available", hcfe);
} catch (RemoteException re) {
GenericTestUtils.assertExceptionContains("The NameNode has no resources available", re.unwrapRemoteException(HealthCheckFailedException.class));
}
}
use of org.apache.hadoop.ha.HAServiceProtocol in project cdap by caskdata.
the class AbstractHDFSStats method getHAWebURL.
@Nullable
private URL getHAWebURL() throws IOException {
String activeNamenode = null;
String nameService = getNameService();
HdfsConfiguration hdfsConf = new HdfsConfiguration(conf);
String nameNodePrincipal = conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "");
hdfsConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, nameNodePrincipal);
for (String nnId : DFSUtil.getNameNodeIds(conf, nameService)) {
HAServiceTarget haServiceTarget = new NNHAServiceTarget(hdfsConf, nameService, nnId);
HAServiceProtocol proxy = haServiceTarget.getProxy(hdfsConf, 10000);
HAServiceStatus serviceStatus = proxy.getServiceStatus();
if (HAServiceProtocol.HAServiceState.ACTIVE != serviceStatus.getState()) {
continue;
}
activeNamenode = DFSUtil.getNamenodeServiceAddr(hdfsConf, nameService, nnId);
}
if (activeNamenode == null) {
throw new IllegalStateException("Could not find an active namenode");
}
return rpcToHttpAddress(URI.create(activeNamenode));
}
Aggregations