Search in sources :

Example 16 with ResourceScheduler

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.

the class TestCapacityScheduler method testMoveAppSuccess.

@Test
public void testMoveAppSuccess() throws Exception {
    ResourceScheduler scheduler = resourceManager.getResourceScheduler();
    // Register node1
    String host_0 = "host_0";
    NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(5 * GB, 1));
    // Register node2
    String host_1 = "host_1";
    NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(5 * GB, 1));
    // ResourceRequest priorities
    Priority priority_0 = Priority.newInstance(0);
    Priority priority_1 = Priority.newInstance(1);
    // Submit application_0
    Application application_0 = new Application("user_0", "a1", resourceManager);
    // app + app attempt event sent to scheduler
    application_0.submit();
    application_0.addNodeManager(host_0, 1234, nm_0);
    application_0.addNodeManager(host_1, 1234, nm_1);
    Resource capability_0_0 = Resources.createResource(3 * GB, 1);
    application_0.addResourceRequestSpec(priority_1, capability_0_0);
    Resource capability_0_1 = Resources.createResource(2 * GB, 1);
    application_0.addResourceRequestSpec(priority_0, capability_0_1);
    Task task_0_0 = new Task(application_0, priority_1, new String[] { host_0, host_1 });
    application_0.addTask(task_0_0);
    // Submit application_1
    Application application_1 = new Application("user_1", "b2", resourceManager);
    // app + app attempt event sent to scheduler
    application_1.submit();
    application_1.addNodeManager(host_0, 1234, nm_0);
    application_1.addNodeManager(host_1, 1234, nm_1);
    Resource capability_1_0 = Resources.createResource(1 * GB, 1);
    application_1.addResourceRequestSpec(priority_1, capability_1_0);
    Resource capability_1_1 = Resources.createResource(2 * GB, 1);
    application_1.addResourceRequestSpec(priority_0, capability_1_1);
    Task task_1_0 = new Task(application_1, priority_1, new String[] { host_0, host_1 });
    application_1.addTask(task_1_0);
    // Send resource requests to the scheduler
    // allocate
    application_0.schedule();
    // allocate
    application_1.schedule();
    // b2 can only run 1 app at a time
    scheduler.moveApplication(application_0.getApplicationId(), "b2");
    nodeUpdate(nm_0);
    nodeUpdate(nm_1);
    // Get allocations from the scheduler
    // task_0_0
    application_0.schedule();
    checkApplicationResourceUsage(0 * GB, application_0);
    // task_1_0
    application_1.schedule();
    checkApplicationResourceUsage(1 * GB, application_1);
    // task_1_0 (1G) application_0 moved to b2 with max running app 1 so it is
    // not scheduled
    checkNodeResourceUsage(1 * GB, nm_0);
    checkNodeResourceUsage(0 * GB, nm_1);
    // lets move application_0 to a queue where it can run
    scheduler.moveApplication(application_0.getApplicationId(), "a2");
    application_0.schedule();
    nodeUpdate(nm_1);
    // Get allocations from the scheduler
    // task_0_0
    application_0.schedule();
    checkApplicationResourceUsage(3 * GB, application_0);
    checkNodeResourceUsage(1 * GB, nm_0);
    checkNodeResourceUsage(3 * GB, nm_1);
}
Also used : NodeManager(org.apache.hadoop.yarn.server.resourcemanager.NodeManager) Task(org.apache.hadoop.yarn.server.resourcemanager.Task) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ResourceScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler) Application(org.apache.hadoop.yarn.server.resourcemanager.Application) SchedulerApplication(org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication) Test(org.junit.Test)

Example 17 with ResourceScheduler

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.

the class TestCapacityScheduler method testConfValidation.

@Test(timeout = 30000)
public void testConfValidation() throws Exception {
    ResourceScheduler scheduler = new CapacityScheduler();
    scheduler.setRMContext(resourceManager.getRMContext());
    Configuration conf = new YarnConfiguration();
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 2048);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 1024);
    try {
        scheduler.reinitialize(conf, mockContext);
        fail("Exception is expected because the min memory allocation is" + " larger than the max memory allocation.");
    } catch (YarnRuntimeException e) {
        // Exception is expected.
        assertTrue("The thrown exception is not the expected one.", e.getMessage().startsWith("Invalid resource scheduler memory"));
    }
    conf = new YarnConfiguration();
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, 2);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, 1);
    try {
        scheduler.reinitialize(conf, mockContext);
        fail("Exception is expected because the min vcores allocation is" + " larger than the max vcores allocation.");
    } catch (YarnRuntimeException e) {
        // Exception is expected.
        assertTrue("The thrown exception is not the expected one.", e.getMessage().startsWith("Invalid resource scheduler vcores"));
    }
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ResourceScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler) Test(org.junit.Test)

Example 18 with ResourceScheduler

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.

the class TestCapacityScheduler method testMoveAppViolateQueueState.

@Test(expected = YarnException.class)
public void testMoveAppViolateQueueState() throws Exception {
    resourceManager = new ResourceManager() {

        @Override
        protected RMNodeLabelsManager createNodeLabelManager() {
            RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
            mgr.init(getConfig());
            return mgr;
        }
    };
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    setupQueueConfiguration(csConf);
    StringBuilder qState = new StringBuilder();
    qState.append(CapacitySchedulerConfiguration.PREFIX).append(B).append(CapacitySchedulerConfiguration.DOT).append(CapacitySchedulerConfiguration.STATE);
    csConf.set(qState.toString(), QueueState.STOPPED.name());
    YarnConfiguration conf = new YarnConfiguration(csConf);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
    resourceManager.init(conf);
    resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey();
    resourceManager.getRMContext().getNMTokenSecretManager().rollMasterKey();
    ((AsyncDispatcher) resourceManager.getRMContext().getDispatcher()).start();
    mockContext = mock(RMContext.class);
    when(mockContext.getConfigurationProvider()).thenReturn(new LocalConfigurationProvider());
    ResourceScheduler scheduler = resourceManager.getResourceScheduler();
    // Register node1
    String host_0 = "host_0";
    NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(6 * GB, 1));
    // ResourceRequest priorities
    Priority priority_0 = Priority.newInstance(0);
    Priority priority_1 = Priority.newInstance(1);
    // Submit application_0
    Application application_0 = new Application("user_0", "a1", resourceManager);
    // app + app attempt event sent to scheduler
    application_0.submit();
    application_0.addNodeManager(host_0, 1234, nm_0);
    Resource capability_0_0 = Resources.createResource(3 * GB, 1);
    application_0.addResourceRequestSpec(priority_1, capability_0_0);
    Resource capability_0_1 = Resources.createResource(2 * GB, 1);
    application_0.addResourceRequestSpec(priority_0, capability_0_1);
    Task task_0_0 = new Task(application_0, priority_1, new String[] { host_0 });
    application_0.addTask(task_0_0);
    // Send resource requests to the scheduler
    // allocate
    application_0.schedule();
    // task_0_0 allocated
    nodeUpdate(nm_0);
    // Get allocations from the scheduler
    // task_0_0
    application_0.schedule();
    checkApplicationResourceUsage(3 * GB, application_0);
    checkNodeResourceUsage(3 * GB, nm_0);
    // b2 queue contains 3GB consumption app,
    // add another 3GB will hit max capacity limit on queue b
    scheduler.moveApplication(application_0.getApplicationId(), "b1");
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) Task(org.apache.hadoop.yarn.server.resourcemanager.Task) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ResourceManager(org.apache.hadoop.yarn.server.resourcemanager.ResourceManager) NodeManager(org.apache.hadoop.yarn.server.resourcemanager.NodeManager) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ResourceScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler) LocalConfigurationProvider(org.apache.hadoop.yarn.LocalConfigurationProvider) NullRMNodeLabelsManager(org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager) Application(org.apache.hadoop.yarn.server.resourcemanager.Application) SchedulerApplication(org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication) RMNodeLabelsManager(org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager) NullRMNodeLabelsManager(org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager) Test(org.junit.Test)

Example 19 with ResourceScheduler

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.

the class TestRMWebServicesNodes method verifyNodeInfoGeneric.

public void verifyNodeInfoGeneric(RMNode node, String state, String rack, String id, String nodeHostName, String nodeHTTPAddress, long lastHealthUpdate, String healthReport, int numContainers, long usedMemoryMB, long availMemoryMB, long usedVirtualCores, long availVirtualCores, String version, int nodePhysicalMemoryMB, int nodeVirtualMemoryMB, double nodeCPUUsage, int containersPhysicalMemoryMB, int containersVirtualMemoryMB, double containersCPUUsage, int numRunningOpportContainers, long usedMemoryOpportGB, int usedVirtualCoresOpport, int numQueuedContainers) throws JSONException, Exception {
    ResourceScheduler sched = rm.getResourceScheduler();
    SchedulerNodeReport report = sched.getNodeReport(node.getNodeID());
    OpportunisticContainersStatus opportunisticStatus = node.getOpportunisticContainersStatus();
    WebServicesTestUtils.checkStringMatch("state", node.getState().toString(), state);
    WebServicesTestUtils.checkStringMatch("rack", node.getRackName(), rack);
    WebServicesTestUtils.checkStringMatch("id", node.getNodeID().toString(), id);
    WebServicesTestUtils.checkStringMatch("nodeHostName", node.getNodeID().getHost(), nodeHostName);
    WebServicesTestUtils.checkStringMatch("healthReport", String.valueOf(node.getHealthReport()), healthReport);
    String expectedHttpAddress = node.getNodeID().getHost() + ":" + node.getHttpPort();
    WebServicesTestUtils.checkStringMatch("nodeHTTPAddress", expectedHttpAddress, nodeHTTPAddress);
    WebServicesTestUtils.checkStringMatch("version", node.getNodeManagerVersion(), version);
    if (node.getNodeUtilization() != null) {
        ResourceUtilization nodeResource = ResourceUtilization.newInstance(nodePhysicalMemoryMB, nodeVirtualMemoryMB, (float) nodeCPUUsage);
        assertEquals("nodeResourceUtilization doesn't match", node.getNodeUtilization(), nodeResource);
    }
    if (node.getAggregatedContainersUtilization() != null) {
        ResourceUtilization containerResource = ResourceUtilization.newInstance(containersPhysicalMemoryMB, containersVirtualMemoryMB, (float) containersCPUUsage);
        assertEquals("containerResourceUtilization doesn't match", node.getAggregatedContainersUtilization(), containerResource);
    }
    long expectedHealthUpdate = node.getLastHealthReportTime();
    assertEquals("lastHealthUpdate doesn't match, got: " + lastHealthUpdate + " expected: " + expectedHealthUpdate, expectedHealthUpdate, lastHealthUpdate);
    if (report != null) {
        assertEquals("numContainers doesn't match: " + numContainers, report.getNumContainers(), numContainers);
        assertEquals("usedMemoryMB doesn't match: " + usedMemoryMB, report.getUsedResource().getMemorySize(), usedMemoryMB);
        assertEquals("availMemoryMB doesn't match: " + availMemoryMB, report.getAvailableResource().getMemorySize(), availMemoryMB);
        assertEquals("usedVirtualCores doesn't match: " + usedVirtualCores, report.getUsedResource().getVirtualCores(), usedVirtualCores);
        assertEquals("availVirtualCores doesn't match: " + availVirtualCores, report.getAvailableResource().getVirtualCores(), availVirtualCores);
    }
    if (opportunisticStatus != null) {
        assertEquals("numRunningOpportContainers doesn't match: " + numRunningOpportContainers, opportunisticStatus.getRunningOpportContainers(), numRunningOpportContainers);
        assertEquals("usedMemoryOpportGB doesn't match: " + usedMemoryOpportGB, opportunisticStatus.getOpportMemoryUsed(), usedMemoryOpportGB);
        assertEquals("usedVirtualCoresOpport doesn't match: " + usedVirtualCoresOpport, opportunisticStatus.getOpportCoresUsed(), usedVirtualCoresOpport);
        assertEquals("numQueuedContainers doesn't match: " + numQueuedContainers, opportunisticStatus.getQueuedOpportContainers(), numQueuedContainers);
    }
}
Also used : OpportunisticContainersStatus(org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus) SchedulerNodeReport(org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport) ResourceScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler) ResourceUtilization(org.apache.hadoop.yarn.api.records.ResourceUtilization)

Example 20 with ResourceScheduler

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.

the class TestDistributedScheduling method testAMOpportunistic.

/**
   * Check if an AM can ask for opportunistic containers and get them.
   * @throws Exception
   */
@Test
public void testAMOpportunistic() throws Exception {
    // Basic container to request
    Resource capability = Resource.newInstance(1024, 1);
    Priority priority = Priority.newInstance(1);
    // Get the cluster topology
    List<NodeReport> nodeReports = rmClient.getNodeReports(NodeState.RUNNING);
    String node = nodeReports.get(0).getNodeId().getHost();
    String rack = nodeReports.get(0).getRackName();
    String[] nodes = new String[] { node };
    String[] racks = new String[] { rack };
    // Create an AM to request resources
    AMRMClient<AMRMClient.ContainerRequest> amClient = null;
    try {
        amClient = new AMRMClientImpl<AMRMClient.ContainerRequest>(client);
        amClient.init(yarnConf);
        amClient.start();
        amClient.registerApplicationMaster(NetUtils.getHostname(), 1024, "");
        // AM requests an opportunistic container
        ExecutionTypeRequest execTypeRequest = ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true);
        ContainerRequest containerRequest = new AMRMClient.ContainerRequest(capability, nodes, racks, priority, 0, true, null, execTypeRequest);
        amClient.addContainerRequest(containerRequest);
        // Wait until the container is allocated
        ContainerId opportunisticContainerId = null;
        for (int i = 0; i < 10 && opportunisticContainerId == null; i++) {
            AllocateResponse allocResponse = amClient.allocate(0.1f);
            List<Container> allocatedContainers = allocResponse.getAllocatedContainers();
            for (Container allocatedContainer : allocatedContainers) {
                // Check that this is the container we required
                assertEquals(ExecutionType.OPPORTUNISTIC, allocatedContainer.getExecutionType());
                opportunisticContainerId = allocatedContainer.getId();
            }
            sleep(100);
        }
        assertNotNull(opportunisticContainerId);
        // The RM sees the container as OPPORTUNISTIC
        ResourceScheduler scheduler = cluster.getResourceManager().getResourceScheduler();
        RMContainer rmContainer = scheduler.getRMContainer(opportunisticContainerId);
        assertEquals(ExecutionType.OPPORTUNISTIC, rmContainer.getExecutionType());
        // Release the opportunistic container
        amClient.releaseAssignedContainer(opportunisticContainerId);
        // Wait for the release container to appear
        boolean released = false;
        for (int i = 0; i < 10 && !released; i++) {
            AllocateResponse allocResponse = amClient.allocate(0.1f);
            List<ContainerStatus> completedContainers = allocResponse.getCompletedContainersStatuses();
            for (ContainerStatus completedContainer : completedContainers) {
                ContainerId completedContainerId = completedContainer.getContainerId();
                assertEquals(completedContainerId, opportunisticContainerId);
                released = true;
            }
            if (!released) {
                sleep(100);
            }
        }
        assertTrue(released);
        // The RM shouldn't see the container anymore
        rmContainer = scheduler.getRMContainer(opportunisticContainerId);
        assertNull(rmContainer);
        // Clean the AM
        amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null);
    } finally {
        if (amClient != null && amClient.getServiceState() == Service.STATE.STARTED) {
            amClient.close();
        }
    }
}
Also used : ExecutionTypeRequest(org.apache.hadoop.yarn.api.records.ExecutionTypeRequest) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) AllocateResponse(org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ContainerRequest(org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest) ResourceScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Test(org.junit.Test)

Aggregations

ResourceScheduler (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler)51 Test (org.junit.Test)22 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)12 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)11 Resource (org.apache.hadoop.yarn.api.records.Resource)10 Configuration (org.apache.hadoop.conf.Configuration)8 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)8 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)8 CapacityScheduler (org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler)8 Container (org.apache.hadoop.yarn.api.records.Container)7 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)7 AllocateResponse (org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse)6 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)6 Priority (org.apache.hadoop.yarn.api.records.Priority)6 Application (org.apache.hadoop.yarn.server.resourcemanager.Application)6 ResourceManager (org.apache.hadoop.yarn.server.resourcemanager.ResourceManager)6 IOException (java.io.IOException)5 ArrayList (java.util.ArrayList)5 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)5 YarnRuntimeException (org.apache.hadoop.yarn.exceptions.YarnRuntimeException)5