Search in sources :

Example 1 with HAServiceProtocol

use of org.apache.hadoop.ha.HAServiceProtocol in project hadoop by apache.

the class RMHAUtils method getHAState.

private static HAServiceState getHAState(YarnConfiguration yarnConf) throws Exception {
    HAServiceTarget haServiceTarget;
    int rpcTimeoutForChecks = yarnConf.getInt(CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY, CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);
    yarnConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, yarnConf.get(YarnConfiguration.RM_PRINCIPAL, ""));
    haServiceTarget = new RMHAServiceTarget(yarnConf);
    HAServiceProtocol proto = haServiceTarget.getProxy(yarnConf, rpcTimeoutForChecks);
    HAServiceState haState = proto.getServiceStatus().getState();
    return haState;
}
Also used : RMHAServiceTarget(org.apache.hadoop.yarn.client.RMHAServiceTarget) HAServiceProtocol(org.apache.hadoop.ha.HAServiceProtocol) HAServiceState(org.apache.hadoop.ha.HAServiceProtocol.HAServiceState) HAServiceTarget(org.apache.hadoop.ha.HAServiceTarget) RMHAServiceTarget(org.apache.hadoop.yarn.client.RMHAServiceTarget)

Example 2 with HAServiceProtocol

use of org.apache.hadoop.ha.HAServiceProtocol in project cdap by caskdata.

the class YarnInfo method getHAWebURL.

/**
   * Should only be called when HA is enabled.
   */
private URL getHAWebURL() throws IOException {
    InetSocketAddress activeRM = null;
    Collection<String> rmIds = HAUtil.getRMHAIds(conf);
    if (rmIds.isEmpty()) {
        throw new IllegalStateException("Resource Manager HA web URL requested in non-HA mode.");
    }
    for (String rmId : rmIds) {
        try {
            YarnConfiguration yarnConf = new YarnConfiguration(conf);
            yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
            yarnConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(YarnConfiguration.RM_PRINCIPAL, ""));
            RMHAServiceTarget rmhaServiceTarget = new RMHAServiceTarget(yarnConf);
            HAServiceProtocol proxy = rmhaServiceTarget.getProxy(yarnConf, 10000);
            HAServiceStatus serviceStatus = proxy.getServiceStatus();
            if (HAServiceProtocol.HAServiceState.ACTIVE != serviceStatus.getState()) {
                continue;
            }
            activeRM = rmhaServiceTarget.getAddress();
        } catch (ConnectException e) {
            LOG.trace("Connection refused when attempting to connect to ResourceManager {}. " + "Assuming that it is not available.", rmId);
        }
    }
    if (activeRM == null) {
        throw new IllegalStateException("Could not find an active resource manager");
    }
    return adminToWebappAddress(activeRM);
}
Also used : RMHAServiceTarget(org.apache.hadoop.yarn.client.RMHAServiceTarget) HAServiceProtocol(org.apache.hadoop.ha.HAServiceProtocol) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) InetSocketAddress(java.net.InetSocketAddress) HAServiceStatus(org.apache.hadoop.ha.HAServiceStatus) ConnectException(java.net.ConnectException)

Example 3 with HAServiceProtocol

use of org.apache.hadoop.ha.HAServiceProtocol in project hadoop by apache.

the class TestNNHealthCheck method doNNHealthCheckTest.

private void doNNHealthCheckTest() throws IOException {
    MockNameNodeResourceChecker mockResourceChecker = new MockNameNodeResourceChecker(conf);
    cluster.getNameNode(0).getNamesystem().setNNResourceChecker(mockResourceChecker);
    NNHAServiceTarget haTarget = new NNHAServiceTarget(conf, DFSUtil.getNamenodeNameServiceId(conf), "nn1");
    final String expectedTargetString;
    if (conf.get(DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY + "." + DFSUtil.getNamenodeNameServiceId(conf) + ".nn1") != null) {
        expectedTargetString = haTarget.getHealthMonitorAddress().toString();
    } else {
        expectedTargetString = haTarget.getAddress().toString();
    }
    assertTrue("Expected haTarget " + haTarget + " containing " + expectedTargetString, haTarget.toString().contains(expectedTargetString));
    HAServiceProtocol rpc = haTarget.getHealthMonitorProxy(conf, conf.getInt(HA_HM_RPC_TIMEOUT_KEY, HA_HM_RPC_TIMEOUT_DEFAULT));
    // Should not throw error, which indicates healthy.
    rpc.monitorHealth();
    mockResourceChecker.setResourcesAvailable(false);
    try {
        // Should throw error - NN is unhealthy.
        rpc.monitorHealth();
        fail("Should not have succeeded in calling monitorHealth");
    } catch (HealthCheckFailedException hcfe) {
        GenericTestUtils.assertExceptionContains("The NameNode has no resources available", hcfe);
    } catch (RemoteException re) {
        GenericTestUtils.assertExceptionContains("The NameNode has no resources available", re.unwrapRemoteException(HealthCheckFailedException.class));
    }
}
Also used : MockNameNodeResourceChecker(org.apache.hadoop.hdfs.server.namenode.MockNameNodeResourceChecker) HAServiceProtocol(org.apache.hadoop.ha.HAServiceProtocol) NNHAServiceTarget(org.apache.hadoop.hdfs.tools.NNHAServiceTarget) RemoteException(org.apache.hadoop.ipc.RemoteException) HealthCheckFailedException(org.apache.hadoop.ha.HealthCheckFailedException)

Example 4 with HAServiceProtocol

use of org.apache.hadoop.ha.HAServiceProtocol in project cdap by caskdata.

the class AbstractHDFSStats method getHAWebURL.

@Nullable
private URL getHAWebURL() throws IOException {
    String activeNamenode = null;
    String nameService = getNameService();
    HdfsConfiguration hdfsConf = new HdfsConfiguration(conf);
    String nameNodePrincipal = conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "");
    hdfsConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, nameNodePrincipal);
    for (String nnId : DFSUtil.getNameNodeIds(conf, nameService)) {
        HAServiceTarget haServiceTarget = new NNHAServiceTarget(hdfsConf, nameService, nnId);
        HAServiceProtocol proxy = haServiceTarget.getProxy(hdfsConf, 10000);
        HAServiceStatus serviceStatus = proxy.getServiceStatus();
        if (HAServiceProtocol.HAServiceState.ACTIVE != serviceStatus.getState()) {
            continue;
        }
        activeNamenode = DFSUtil.getNamenodeServiceAddr(hdfsConf, nameService, nnId);
    }
    if (activeNamenode == null) {
        throw new IllegalStateException("Could not find an active namenode");
    }
    return rpcToHttpAddress(URI.create(activeNamenode));
}
Also used : HAServiceProtocol(org.apache.hadoop.ha.HAServiceProtocol) NNHAServiceTarget(org.apache.hadoop.hdfs.tools.NNHAServiceTarget) HAServiceStatus(org.apache.hadoop.ha.HAServiceStatus) NNHAServiceTarget(org.apache.hadoop.hdfs.tools.NNHAServiceTarget) HAServiceTarget(org.apache.hadoop.ha.HAServiceTarget) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Nullable(javax.annotation.Nullable)

Aggregations

HAServiceProtocol (org.apache.hadoop.ha.HAServiceProtocol)4 HAServiceStatus (org.apache.hadoop.ha.HAServiceStatus)2 HAServiceTarget (org.apache.hadoop.ha.HAServiceTarget)2 NNHAServiceTarget (org.apache.hadoop.hdfs.tools.NNHAServiceTarget)2 RMHAServiceTarget (org.apache.hadoop.yarn.client.RMHAServiceTarget)2 ConnectException (java.net.ConnectException)1 InetSocketAddress (java.net.InetSocketAddress)1 Nullable (javax.annotation.Nullable)1 HAServiceState (org.apache.hadoop.ha.HAServiceProtocol.HAServiceState)1 HealthCheckFailedException (org.apache.hadoop.ha.HealthCheckFailedException)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MockNameNodeResourceChecker (org.apache.hadoop.hdfs.server.namenode.MockNameNodeResourceChecker)1 RemoteException (org.apache.hadoop.ipc.RemoteException)1 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)1