Search in sources :

Example 26 with MiniYARNCluster

use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.

the class TestAMRMProxy method testE2ETokenSwap.

/*
   * This test validates that an AM cannot register directly to the RM, with the
   * token provided by the AMRMProxy.
   */
@Test(timeout = 120000)
public void testE2ETokenSwap() throws Exception {
    ApplicationMasterProtocol client;
    try (MiniYARNCluster cluster = new MiniYARNCluster("testE2ETokenSwap", 1, 1, 1);
        YarnClient rmClient = YarnClient.createYarnClient()) {
        Configuration conf = new YarnConfiguration();
        conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
        cluster.init(conf);
        cluster.start();
        // the client will connect to the RM with the token provided by AMRMProxy
        final Configuration yarnConf = cluster.getConfig();
        rmClient.init(yarnConf);
        rmClient.start();
        ApplicationAttemptId appAttmptId = createApp(rmClient, cluster, conf);
        ApplicationId appId = appAttmptId.getApplicationId();
        client = createAMRMProtocol(rmClient, appId, cluster, yarnConf);
        try {
            client.registerApplicationMaster(RegisterApplicationMasterRequest.newInstance(NetUtils.getHostname(), 1024, ""));
            Assert.fail();
        } catch (IOException e) {
            Assert.assertTrue(e.getMessage().startsWith("Invalid AMRMToken from appattempt_"));
        }
    }
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ApplicationMasterProtocol(org.apache.hadoop.yarn.api.ApplicationMasterProtocol) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) IOException(java.io.IOException) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) Test(org.junit.Test)

Example 27 with MiniYARNCluster

use of org.apache.hadoop.yarn.server.MiniYARNCluster in project flink by apache.

the class YarnTestBase method start.

private static void start(Configuration conf, String principal, String keytab) {
    // set the home directory to a temp directory. Flink on YARN is using the home dir to distribute the file
    File homeDir = null;
    try {
        homeDir = tmp.newFolder();
    } catch (IOException e) {
        e.printStackTrace();
        Assert.fail(e.getMessage());
    }
    System.setProperty("user.home", homeDir.getAbsolutePath());
    String uberjarStartLoc = "..";
    LOG.info("Trying to locate uberjar in {}", new File(uberjarStartLoc));
    flinkUberjar = findFile(uberjarStartLoc, new RootDirFilenameFilter());
    Assert.assertNotNull("Flink uberjar not found", flinkUberjar);
    String flinkDistRootDir = flinkUberjar.getParentFile().getParent();
    // the uberjar is located in lib/
    flinkLibFolder = flinkUberjar.getParentFile();
    Assert.assertNotNull("Flink flinkLibFolder not found", flinkLibFolder);
    Assert.assertTrue("lib folder not found", flinkLibFolder.exists());
    Assert.assertTrue("lib folder not found", flinkLibFolder.isDirectory());
    if (!flinkUberjar.exists()) {
        Assert.fail("Unable to locate yarn-uberjar.jar");
    }
    try {
        LOG.info("Starting up MiniYARNCluster");
        if (yarnCluster == null) {
            yarnCluster = new MiniYARNCluster(conf.get(YarnTestBase.TEST_CLUSTER_NAME_KEY), NUM_NODEMANAGERS, 1, 1);
            yarnCluster.init(conf);
            yarnCluster.start();
        }
        Map<String, String> map = new HashMap<String, String>(System.getenv());
        File flinkConfDirPath = findFile(flinkDistRootDir, new ContainsName(new String[] { "flink-conf.yaml" }));
        Assert.assertNotNull(flinkConfDirPath);
        if (!StringUtils.isBlank(principal) && !StringUtils.isBlank(keytab)) {
            //copy conf dir to test temporary workspace location
            tempConfPathForSecureRun = tmp.newFolder("conf");
            String confDirPath = flinkConfDirPath.getParentFile().getAbsolutePath();
            FileUtils.copyDirectory(new File(confDirPath), tempConfPathForSecureRun);
            try (FileWriter fw = new FileWriter(new File(tempConfPathForSecureRun, "flink-conf.yaml"), true);
                BufferedWriter bw = new BufferedWriter(fw);
                PrintWriter out = new PrintWriter(bw)) {
                LOG.info("writing keytab: " + keytab + " and principal: " + principal + " to config file");
                out.println("");
                out.println("#Security Configurations Auto Populated ");
                out.println(SecurityOptions.KERBEROS_LOGIN_KEYTAB.key() + ": " + keytab);
                out.println(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL.key() + ": " + principal);
                out.println("");
            } catch (IOException e) {
                throw new RuntimeException("Exception occured while trying to append the security configurations.", e);
            }
            String configDir = tempConfPathForSecureRun.getAbsolutePath();
            LOG.info("Temporary Flink configuration directory to be used for secure test: {}", configDir);
            Assert.assertNotNull(configDir);
            map.put(ConfigConstants.ENV_FLINK_CONF_DIR, configDir);
        } else {
            map.put(ConfigConstants.ENV_FLINK_CONF_DIR, flinkConfDirPath.getParent());
        }
        File yarnConfFile = writeYarnSiteConfigXML(conf);
        map.put("YARN_CONF_DIR", yarnConfFile.getParentFile().getAbsolutePath());
        // see YarnClusterDescriptor() for more infos
        map.put("IN_TESTS", "yes we are in tests");
        TestBaseUtils.setEnv(map);
        Assert.assertTrue(yarnCluster.getServiceState() == Service.STATE.STARTED);
        // wait for the nodeManagers to connect
        while (!yarnCluster.waitForNodeManagersToConnect(500)) {
            LOG.info("Waiting for Nodemanagers to connect");
        }
    } catch (Exception ex) {
        ex.printStackTrace();
        LOG.error("setup failure", ex);
        Assert.fail();
    }
}
Also used : HashMap(java.util.HashMap) FileWriter(java.io.FileWriter) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) File(java.io.File) PrintWriter(java.io.PrintWriter)

Example 28 with MiniYARNCluster

use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.

the class TestYarnClient method testSubmitIncorrectQueueToCapacityScheduler.

@Test(timeout = 30000)
public void testSubmitIncorrectQueueToCapacityScheduler() throws IOException {
    MiniYARNCluster cluster = new MiniYARNCluster("testMRAMTokens", 1, 1, 1);
    YarnClient rmClient = null;
    try {
        YarnConfiguration conf = new YarnConfiguration();
        conf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getName());
        cluster.init(conf);
        cluster.start();
        final Configuration yarnConf = cluster.getConfig();
        rmClient = YarnClient.createYarnClient();
        rmClient.init(yarnConf);
        rmClient.start();
        YarnClientApplication newApp = rmClient.createApplication();
        ApplicationId appId = newApp.getNewApplicationResponse().getApplicationId();
        // Create launch context for app master
        ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);
        // set the application id
        appContext.setApplicationId(appId);
        // set the application name
        appContext.setApplicationName("test");
        // Set the queue to which this application is to be submitted in the RM
        appContext.setQueue("nonexist");
        // Set up the container launch context for the application master
        ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
        appContext.setAMContainerSpec(amContainer);
        appContext.setResource(Resource.newInstance(1024, 1));
        // appContext.setUnmanagedAM(unmanaged);
        // Submit the application to the applications manager
        rmClient.submitApplication(appContext);
        Assert.fail("Job submission should have thrown an exception");
    } catch (YarnException e) {
        Assert.assertTrue(e.getMessage().contains("Failed to submit"));
    } finally {
        if (rmClient != null) {
            rmClient.stop();
        }
        cluster.stop();
    }
}
Also used : CapacitySchedulerConfiguration(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) YarnClientApplication(org.apache.hadoop.yarn.client.api.YarnClientApplication) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) CapacityScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) Test(org.junit.Test)

Example 29 with MiniYARNCluster

use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.

the class TestYarnClient method testReservationDelete.

@Test
public void testReservationDelete() throws Exception {
    MiniYARNCluster cluster = setupMiniYARNCluster();
    YarnClient client = setupYarnClient(cluster);
    try {
        Clock clock = new UTCClock();
        long arrival = clock.getTime();
        long duration = 60000;
        long deadline = (long) (arrival + 1.05 * duration);
        ReservationSubmissionRequest sRequest = submitReservationTestHelper(client, arrival, deadline, duration);
        ReservationId reservationID = sRequest.getReservationId();
        // Delete the reservation
        ReservationDeleteRequest dRequest = ReservationDeleteRequest.newInstance(reservationID);
        ReservationDeleteResponse dResponse = client.deleteReservation(dRequest);
        Assert.assertNotNull(dResponse);
        System.out.println("Delete reservation response: " + dResponse);
        // List reservations, search by non-existent reservationID
        ReservationListRequest request = ReservationListRequest.newInstance(ReservationSystemTestUtil.reservationQ, reservationID.toString(), -1, -1, false);
        ReservationListResponse response = client.listReservations(request);
        Assert.assertNotNull(response);
        Assert.assertEquals(0, response.getReservationAllocationState().size());
    } finally {
        // clean-up
        if (client != null) {
            client.stop();
        }
        cluster.stop();
    }
}
Also used : ReservationListResponse(org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse) ReservationId(org.apache.hadoop.yarn.api.records.ReservationId) ReservationListRequest(org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest) ReservationSubmissionRequest(org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest) ReservationDeleteResponse(org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse) ReservationDeleteRequest(org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) UTCClock(org.apache.hadoop.yarn.util.UTCClock) Clock(org.apache.hadoop.yarn.util.Clock) UTCClock(org.apache.hadoop.yarn.util.UTCClock) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) Test(org.junit.Test)

Example 30 with MiniYARNCluster

use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.

the class TestYarnClient method testListReservationsByTimeIntervalContainingNoReservations.

@Test
public void testListReservationsByTimeIntervalContainingNoReservations() throws Exception {
    MiniYARNCluster cluster = setupMiniYARNCluster();
    YarnClient client = setupYarnClient(cluster);
    try {
        Clock clock = new UTCClock();
        long arrival = clock.getTime();
        long duration = 60000;
        long deadline = (long) (arrival + 1.05 * duration);
        ReservationSubmissionRequest sRequest = submitReservationTestHelper(client, arrival, deadline, duration);
        // List reservations, search by very large start time.
        ReservationListRequest request = ReservationListRequest.newInstance(ReservationSystemTestUtil.reservationQ, "", Long.MAX_VALUE, -1, false);
        ReservationListResponse response = client.listReservations(request);
        // Ensure all reservations are filtered out.
        Assert.assertNotNull(response);
        Assert.assertEquals(response.getReservationAllocationState().size(), 0);
        duration = 30000;
        deadline = sRequest.getReservationDefinition().getDeadline();
        // List reservations, search by start time after the reservation
        // end time.
        request = ReservationListRequest.newInstance(ReservationSystemTestUtil.reservationQ, "", deadline + duration, deadline + 2 * duration, false);
        response = client.listReservations(request);
        // Ensure all reservations are filtered out.
        Assert.assertNotNull(response);
        Assert.assertEquals(response.getReservationAllocationState().size(), 0);
        arrival = clock.getTime();
        // List reservations, search by end time before the reservation start
        // time.
        request = ReservationListRequest.newInstance(ReservationSystemTestUtil.reservationQ, "", 0, arrival - duration, false);
        response = client.listReservations(request);
        // Ensure all reservations are filtered out.
        Assert.assertNotNull(response);
        Assert.assertEquals(response.getReservationAllocationState().size(), 0);
        // List reservations, search by very small end time.
        request = ReservationListRequest.newInstance(ReservationSystemTestUtil.reservationQ, "", 0, 1, false);
        response = client.listReservations(request);
        // Ensure all reservations are filtered out.
        Assert.assertNotNull(response);
        Assert.assertEquals(response.getReservationAllocationState().size(), 0);
    } finally {
        // clean-up
        if (client != null) {
            client.stop();
        }
        cluster.stop();
    }
}
Also used : ReservationListResponse(org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse) ReservationListRequest(org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest) ReservationSubmissionRequest(org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) UTCClock(org.apache.hadoop.yarn.util.UTCClock) Clock(org.apache.hadoop.yarn.util.Clock) UTCClock(org.apache.hadoop.yarn.util.UTCClock) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) Test(org.junit.Test)

Aggregations

MiniYARNCluster (org.apache.hadoop.yarn.server.MiniYARNCluster)35 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)20 Test (org.junit.Test)19 Configuration (org.apache.hadoop.conf.Configuration)17 YarnClient (org.apache.hadoop.yarn.client.api.YarnClient)15 ReservationSubmissionRequest (org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest)7 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)7 Clock (org.apache.hadoop.yarn.util.Clock)7 UTCClock (org.apache.hadoop.yarn.util.UTCClock)7 ReservationListRequest (org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest)5 ReservationListResponse (org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse)5 CapacitySchedulerConfiguration (org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration)5 BeforeClass (org.junit.BeforeClass)5 File (java.io.File)4 ReservationId (org.apache.hadoop.yarn.api.records.ReservationId)4 FileOutputStream (java.io.FileOutputStream)3 IOException (java.io.IOException)3 OutputStream (java.io.OutputStream)3 URL (java.net.URL)3 Path (org.apache.hadoop.fs.Path)3