Search in sources :

Example 1 with MiniYARNCluster

use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.

the class TestDiskFailures method testDirsFailures.

private void testDirsFailures(boolean localORLogDirs) throws IOException {
    String dirType = localORLogDirs ? "local" : "log";
    String dirsProperty = localORLogDirs ? YarnConfiguration.NM_LOCAL_DIRS : YarnConfiguration.NM_LOG_DIRS;
    Configuration conf = new Configuration();
    // set disk health check interval to a small value (say 1 sec).
    conf.setLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, DISK_HEALTH_CHECK_INTERVAL);
    // If 2 out of the total 4 local-dirs fail OR if 2 Out of the total 4
    // log-dirs fail, then the node's health status should become unhealthy.
    conf.setFloat(YarnConfiguration.NM_MIN_HEALTHY_DISKS_FRACTION, 0.60F);
    if (yarnCluster != null) {
        yarnCluster.stop();
        FileUtil.fullyDelete(localFSDirBase);
        localFSDirBase.mkdirs();
    }
    LOG.info("Starting up YARN cluster");
    yarnCluster = new MiniYARNCluster(TestDiskFailures.class.getName(), 1, numLocalDirs, numLogDirs);
    yarnCluster.init(conf);
    yarnCluster.start();
    NodeManager nm = yarnCluster.getNodeManager(0);
    LOG.info("Configured nm-" + dirType + "-dirs=" + nm.getConfig().get(dirsProperty));
    dirsHandler = nm.getNodeHealthChecker().getDiskHandler();
    List<String> list = localORLogDirs ? dirsHandler.getLocalDirs() : dirsHandler.getLogDirs();
    String[] dirs = list.toArray(new String[list.size()]);
    Assert.assertEquals("Number of nm-" + dirType + "-dirs is wrong.", numLocalDirs, dirs.length);
    String expectedDirs = StringUtils.join(",", list);
    // validate the health of disks initially
    verifyDisksHealth(localORLogDirs, expectedDirs, true);
    // Make 1 nm-local-dir fail and verify if "the nodemanager can identify
    // the disk failure(s) and can update the list of good nm-local-dirs.
    prepareDirToFail(dirs[2]);
    expectedDirs = dirs[0] + "," + dirs[1] + "," + dirs[3];
    verifyDisksHealth(localORLogDirs, expectedDirs, true);
    // Now, make 1 more nm-local-dir/nm-log-dir fail and verify if "the
    // nodemanager can identify the disk failures and can update the list of
    // good nm-local-dirs/nm-log-dirs and can update the overall health status
    // of the node to unhealthy".
    prepareDirToFail(dirs[0]);
    expectedDirs = dirs[1] + "," + dirs[3];
    verifyDisksHealth(localORLogDirs, expectedDirs, false);
    // Fail the remaining 2 local-dirs/log-dirs and verify if NM remains with
    // empty list of local-dirs/log-dirs and the overall health status is
    // unhealthy.
    prepareDirToFail(dirs[1]);
    prepareDirToFail(dirs[3]);
    expectedDirs = "";
    verifyDisksHealth(localORLogDirs, expectedDirs, false);
}
Also used : NodeManager(org.apache.hadoop.yarn.server.nodemanager.NodeManager) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster)

Example 2 with MiniYARNCluster

use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.

the class TestRMFailover method testRMWebAppRedirect.

@Test
public void testRMWebAppRedirect() throws YarnException, InterruptedException, IOException {
    cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 0, 1, 1);
    conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
    cluster.init(conf);
    cluster.start();
    getAdminService(0).transitionToActive(req);
    String rm1Url = "http://0.0.0.0:18088";
    String rm2Url = "http://0.0.0.0:28088";
    String redirectURL = getRedirectURL(rm2Url);
    // if uri is null, RMWebAppFilter will append a slash at the trail of the redirection url
    assertEquals(redirectURL, rm1Url + "/");
    redirectURL = getRedirectURL(rm2Url + "/metrics");
    assertEquals(redirectURL, rm1Url + "/metrics");
    redirectURL = getRedirectURL(rm2Url + "/jmx?param1=value1+x&param2=y");
    assertEquals(rm1Url + "/jmx?param1=value1+x&param2=y", redirectURL);
    // standby RM links /conf, /stacks, /logLevel, /static, /logs,
    // /cluster/cluster as well as webService
    // /ws/v1/cluster/info should not be redirected to active RM
    redirectURL = getRedirectURL(rm2Url + "/cluster/cluster");
    assertNull(redirectURL);
    redirectURL = getRedirectURL(rm2Url + "/conf");
    assertNull(redirectURL);
    redirectURL = getRedirectURL(rm2Url + "/stacks");
    assertNull(redirectURL);
    redirectURL = getRedirectURL(rm2Url + "/logLevel");
    assertNull(redirectURL);
    redirectURL = getRedirectURL(rm2Url + "/static");
    assertNull(redirectURL);
    redirectURL = getRedirectURL(rm2Url + "/logs");
    assertNull(redirectURL);
    redirectURL = getRedirectURL(rm2Url + "/ws/v1/cluster/info");
    assertNull(redirectURL);
    redirectURL = getRedirectURL(rm2Url + "/ws/v1/cluster/apps");
    assertEquals(redirectURL, rm1Url + "/ws/v1/cluster/apps");
    redirectURL = getRedirectURL(rm2Url + "/proxy/" + fakeAppId);
    assertNull(redirectURL);
    // transit the active RM to standby
    // Both of RMs are in standby mode
    getAdminService(0).transitionToStandby(req);
    // RM2 is expected to send the httpRequest to itself.
    // The Header Field: Refresh is expected to be set.
    redirectURL = getRefreshURL(rm2Url);
    assertTrue(redirectURL != null && redirectURL.contains(YarnWebParams.NEXT_REFRESH_INTERVAL) && redirectURL.contains(rm2Url));
}
Also used : MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) Test(org.junit.Test)

Example 3 with MiniYARNCluster

use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.

the class TestRMFailover method setup.

@Before
public void setup() throws IOException {
    fakeAppId = ApplicationId.newInstance(System.currentTimeMillis(), 0);
    conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
    conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
    HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE, conf);
    HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE, conf);
    conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
    conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
    conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, true);
    cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 1, 1, 1);
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) Before(org.junit.Before)

Example 4 with MiniYARNCluster

use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.

the class TestHedgingRequestRMFailoverProxyProvider method testHedgingRequestProxyProvider.

@Test
public void testHedgingRequestProxyProvider() throws Exception {
    Configuration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
    conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
    conf.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
    conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2,rm3,rm4,rm5");
    conf.set(YarnConfiguration.CLIENT_FAILOVER_PROXY_PROVIDER, RequestHedgingRMFailoverProxyProvider.class.getName());
    conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, 2000);
    try (MiniYARNCluster cluster = new MiniYARNCluster("testHedgingRequestProxyProvider", 5, 0, 1, 1)) {
        HATestUtil.setRpcAddressForRM("rm1", 10000, conf);
        HATestUtil.setRpcAddressForRM("rm2", 20000, conf);
        HATestUtil.setRpcAddressForRM("rm3", 30000, conf);
        HATestUtil.setRpcAddressForRM("rm4", 40000, conf);
        HATestUtil.setRpcAddressForRM("rm5", 50000, conf);
        conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
        cluster.init(conf);
        cluster.start();
        final YarnClient client = YarnClient.createYarnClient();
        client.init(conf);
        client.start();
        // Transition rm5 to active;
        long start = System.currentTimeMillis();
        makeRMActive(cluster, 4);
        validateActiveRM(client);
        long end = System.currentTimeMillis();
        System.out.println("Client call succeeded at " + end);
        // should return the response fast
        Assert.assertTrue(end - start <= 10000);
        // transition rm5 to standby
        cluster.getResourceManager(4).getRMContext().getRMAdminService().transitionToStandby(new HAServiceProtocol.StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER));
        makeRMActive(cluster, 2);
        validateActiveRM(client);
    }
}
Also used : HAServiceProtocol(org.apache.hadoop.ha.HAServiceProtocol) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) Test(org.junit.Test)

Example 5 with MiniYARNCluster

use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.

the class TestYarnCLI method testGetQueueInfoPreemptionDisabled.

@Test
public void testGetQueueInfoPreemptionDisabled() throws Exception {
    CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
    ReservationSystemTestUtil.setupQueueConfiguration(conf);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
    conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, "org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity." + "ProportionalCapacityPreemptionPolicy");
    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
    conf.setBoolean(PREFIX + "root.a.a1.disable_preemption", true);
    try (MiniYARNCluster cluster = new MiniYARNCluster("testReservationAPIs", 2, 1, 1);
        YarnClient yarnClient = YarnClient.createYarnClient()) {
        cluster.init(conf);
        cluster.start();
        final Configuration yarnConf = cluster.getConfig();
        yarnClient.init(yarnConf);
        yarnClient.start();
        QueueCLI cli = new QueueCLI();
        cli.setClient(yarnClient);
        cli.setSysOutPrintStream(sysOut);
        cli.setSysErrPrintStream(sysErr);
        sysOutStream.reset();
        int result = cli.run(new String[] { "-status", "a1" });
        assertEquals(0, result);
        Assert.assertTrue(sysOutStream.toString().contains("Preemption : disabled"));
    }
}
Also used : CapacitySchedulerConfiguration(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) CapacitySchedulerConfiguration(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) Test(org.junit.Test)

Aggregations

MiniYARNCluster (org.apache.hadoop.yarn.server.MiniYARNCluster)39 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)23 Configuration (org.apache.hadoop.conf.Configuration)19 Test (org.junit.Test)19 YarnClient (org.apache.hadoop.yarn.client.api.YarnClient)15 File (java.io.File)7 ReservationSubmissionRequest (org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest)7 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)7 Clock (org.apache.hadoop.yarn.util.Clock)7 UTCClock (org.apache.hadoop.yarn.util.UTCClock)7 IOException (java.io.IOException)5 URL (java.net.URL)5 ReservationListRequest (org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest)5 ReservationListResponse (org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse)5 CapacitySchedulerConfiguration (org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration)5 BeforeClass (org.junit.BeforeClass)5 FileOutputStream (java.io.FileOutputStream)4 OutputStream (java.io.OutputStream)4 Path (org.apache.hadoop.fs.Path)4 ReservationId (org.apache.hadoop.yarn.api.records.ReservationId)4