Search in sources :

Example 41 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class TestLeafQueue method testSingleQueueWithMultipleUsers.

@Test
public void testSingleQueueWithMultipleUsers() throws Exception {
    // Mock the queue
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    //unset maxCapacity
    a.setMaxCapacity(1.0f);
    // Users
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    final String user_2 = "user_2";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, a.getAbstractUsersManager(), spyRMContext);
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, a.getAbstractUsersManager(), spyRMContext);
    // same user
    a.submitApplicationAttempt(app_1, user_0);
    final ApplicationAttemptId appAttemptId_2 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_2 = new FiCaSchedulerApp(appAttemptId_2, user_1, a, a.getAbstractUsersManager(), spyRMContext);
    a.submitApplicationAttempt(app_2, user_1);
    final ApplicationAttemptId appAttemptId_3 = TestUtils.getMockApplicationAttemptId(3, 0);
    FiCaSchedulerApp app_3 = new FiCaSchedulerApp(appAttemptId_3, user_2, a, a.getAbstractUsersManager(), spyRMContext);
    a.submitApplicationAttempt(app_3, user_2);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1, app_2.getApplicationAttemptId(), app_2, app_3.getApplicationAttemptId(), app_3);
    // Setup some nodes
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0);
    final int numNodes = 1;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 16);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    when(csContext.getClusterResource()).thenReturn(clusterResource);
    // Setup resource-requests
    Priority priority = TestUtils.createMockPriority(1);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 10, true, priority, recordFactory)));
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 10, true, priority, recordFactory)));
    /** 
     * Start testing... 
     */
    // Only 1 container
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(1 * GB, a.getUsedResources().getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
    // you can get one container more than user-limit
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Can't allocate 3rd due to user-limit
    a.setUserLimit(25);
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Submit resource requests for other apps now to 'activate' them
    app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 3 * GB, 1, true, priority, recordFactory)));
    app_3.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
    // Now allocations should goto app_2 since 
    // user_0 is at limit inspite of high user-limit-factor
    a.setUserLimitFactor(10);
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(3 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Now allocations should goto app_0 since 
    // user_0 is at user-limit not above it
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(6 * GB, a.getUsedResources().getMemorySize());
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(3 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Test max-capacity
    // Now - no more allocs since we are at max-cap
    a.setMaxCapacity(0.5f);
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(6 * GB, a.getUsedResources().getMemorySize());
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(3 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Revert max-capacity and user-limit-factor
    // Now, allocations should goto app_3 since it's under user-limit 
    a.setMaxCapacity(1.0f);
    a.setUserLimitFactor(1);
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(7 * GB, a.getUsedResources().getMemorySize());
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(3 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Now we should assign to app_3 again since user_2 is under user-limit
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(8 * GB, a.getUsedResources().getMemorySize());
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(3 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, app_3.getCurrentConsumption().getMemorySize());
    // 8. Release each container from app_0
    for (RMContainer rmContainer : app_0.getLiveContainers()) {
        a.completedContainer(clusterResource, app_0, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    }
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(0 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(3 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, app_3.getCurrentConsumption().getMemorySize());
    // 9. Release each container from app_2
    for (RMContainer rmContainer : app_2.getLiveContainers()) {
        a.completedContainer(clusterResource, app_2, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    }
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(0 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, app_3.getCurrentConsumption().getMemorySize());
    // 10. Release each container from app_3
    for (RMContainer rmContainer : app_3.getLiveContainers()) {
        a.completedContainer(clusterResource, app_3, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    }
    assertEquals(0 * GB, a.getUsedResources().getMemorySize());
    assertEquals(0 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) Test(org.junit.Test)

Example 42 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class TestLeafQueue method testUserHeadroomMultiApp.

@Test
public void testUserHeadroomMultiApp() throws Exception {
    // Mock the queue
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    //unset maxCapacity
    a.setMaxCapacity(1.0f);
    // Users
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, a.getAbstractUsersManager(), spyRMContext);
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, a.getAbstractUsersManager(), spyRMContext);
    // same user
    a.submitApplicationAttempt(app_1, user_0);
    final ApplicationAttemptId appAttemptId_2 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_2 = new FiCaSchedulerApp(appAttemptId_2, user_1, a, a.getAbstractUsersManager(), spyRMContext);
    a.submitApplicationAttempt(app_2, user_1);
    // Setup some nodes
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 16 * GB);
    String host_1 = "127.0.0.2";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 16 * GB);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1, app_2.getApplicationAttemptId(), app_2);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1);
    final int numNodes = 2;
    Resource clusterResource = Resources.createResource(numNodes * (16 * GB), 1);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    Priority priority = TestUtils.createMockPriority(1);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory)));
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(1 * GB, a.getUsedResources().getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    //Now, headroom is the same for all apps for a given user + queue combo
    //and a change to any app's headroom is reflected for all the user's apps
    //once those apps are active/have themselves calculated headroom for 
    //allocation at least one time
    assertEquals(2 * GB, app_0.getHeadroom().getMemorySize());
    //not yet active
    assertEquals(0 * GB, app_1.getHeadroom().getMemorySize());
    //not yet active
    assertEquals(0 * GB, app_2.getHeadroom().getMemorySize());
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_0.getHeadroom().getMemorySize());
    //now active
    assertEquals(1 * GB, app_1.getHeadroom().getMemorySize());
    //not yet active
    assertEquals(0 * GB, app_2.getHeadroom().getMemorySize());
    //Complete container and verify that headroom is updated, for both apps 
    //for the user
    RMContainer rmContainer = app_0.getLiveContainers().iterator().next();
    a.completedContainer(clusterResource, app_0, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    assertEquals(2 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(2 * GB, app_1.getHeadroom().getMemorySize());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) Test(org.junit.Test)

Example 43 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class TestParentQueue method testSingleLevelQueues.

@Test
public void testSingleLevelQueues() throws Exception {
    // Setup queue configs
    setupSingleLevelQueues(csConf);
    Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
    CSQueue root = CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, TestUtils.spyHook);
    // Setup some nodes
    final int memoryPerNode = 10;
    final int coresPerNode = 16;
    final int numNodes = 2;
    FiCaSchedulerNode node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, memoryPerNode * GB);
    FiCaSchedulerNode node_1 = TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode * GB);
    final Resource clusterResource = Resources.createResource(numNodes * (memoryPerNode * GB), numNodes * coresPerNode);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Start testing
    LeafQueue a = (LeafQueue) queues.get(A);
    LeafQueue b = (LeafQueue) queues.get(B);
    a.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    b.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    queues.get(CapacitySchedulerConfiguration.ROOT).getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    // Simulate B returning a container on node_0
    stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 1 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    verifyQueueMetrics(a, 0 * GB, clusterResource);
    verifyQueueMetrics(b, 1 * GB, clusterResource);
    // Now, A should get the scheduling opportunity since A=0G/6G, B=1G/14G
    stubQueueAllocation(a, clusterResource, node_1, 2 * GB);
    stubQueueAllocation(b, clusterResource, node_1, 1 * GB);
    root.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    InOrder allocationOrder = inOrder(a, b);
    allocationOrder.verify(a).assignContainers(eq(clusterResource), any(PlacementSet.class), anyResourceLimits(), any(SchedulingMode.class));
    root.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    allocationOrder.verify(b).assignContainers(eq(clusterResource), any(PlacementSet.class), anyResourceLimits(), any(SchedulingMode.class));
    verifyQueueMetrics(a, 2 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    // Now, B should get the scheduling opportunity 
    // since A has 2/6G while B has 2/14G
    stubQueueAllocation(a, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 2 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    allocationOrder = inOrder(b, a);
    allocationOrder.verify(b).assignContainers(eq(clusterResource), any(PlacementSet.class), anyResourceLimits(), any(SchedulingMode.class));
    allocationOrder.verify(a).assignContainers(eq(clusterResource), any(PlacementSet.class), anyResourceLimits(), any(SchedulingMode.class));
    verifyQueueMetrics(a, 3 * GB, clusterResource);
    verifyQueueMetrics(b, 4 * GB, clusterResource);
    // Now, B should still get the scheduling opportunity 
    // since A has 3/6G while B has 4/14G
    stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 4 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    allocationOrder = inOrder(b, a);
    allocationOrder.verify(b).assignContainers(eq(clusterResource), any(PlacementSet.class), anyResourceLimits(), any(SchedulingMode.class));
    allocationOrder.verify(a).assignContainers(eq(clusterResource), any(PlacementSet.class), anyResourceLimits(), any(SchedulingMode.class));
    verifyQueueMetrics(a, 3 * GB, clusterResource);
    verifyQueueMetrics(b, 8 * GB, clusterResource);
    // Now, A should get the scheduling opportunity 
    // since A has 3/6G while B has 8/14G
    stubQueueAllocation(a, clusterResource, node_1, 1 * GB);
    stubQueueAllocation(b, clusterResource, node_1, 1 * GB);
    root.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    root.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    allocationOrder = inOrder(a, b);
    allocationOrder.verify(b).assignContainers(eq(clusterResource), any(PlacementSet.class), anyResourceLimits(), any(SchedulingMode.class));
    allocationOrder.verify(a).assignContainers(eq(clusterResource), any(PlacementSet.class), anyResourceLimits(), any(SchedulingMode.class));
    verifyQueueMetrics(a, 4 * GB, clusterResource);
    verifyQueueMetrics(b, 9 * GB, clusterResource);
}
Also used : InOrder(org.mockito.InOrder) FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) HashMap(java.util.HashMap) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) Resource(org.apache.hadoop.yarn.api.records.Resource) PlacementSet(org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet) Test(org.junit.Test)

Example 44 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project weave by continuuity.

the class ApplicationMasterService method initContainerRequests.

private Queue<RunnableContainerRequest> initContainerRequests() {
    // Orderly stores container requests.
    Queue<RunnableContainerRequest> requests = Lists.newLinkedList();
    // For each order in the weaveSpec, create container request for each runnable.
    for (WeaveSpecification.Order order : weaveSpec.getOrders()) {
        // Group container requests based on resource requirement.
        ImmutableMultimap.Builder<Resource, RuntimeSpecification> builder = ImmutableMultimap.builder();
        for (String runnableName : order.getNames()) {
            RuntimeSpecification runtimeSpec = weaveSpec.getRunnables().get(runnableName);
            Resource capability = createCapability(runtimeSpec.getResourceSpecification());
            builder.put(capability, runtimeSpec);
        }
        requests.add(new RunnableContainerRequest(order.getType(), builder.build()));
    }
    return requests;
}
Also used : Resource(org.apache.hadoop.yarn.api.records.Resource) WeaveSpecification(com.continuuity.weave.api.WeaveSpecification) ImmutableMultimap(com.google.common.collect.ImmutableMultimap) RuntimeSpecification(com.continuuity.weave.api.RuntimeSpecification)

Example 45 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project weave by continuuity.

the class ApplicationMasterProcessLauncher method doLaunch.

@Override
@SuppressWarnings("unchecked")
protected <R> ProcessController<R> doLaunch(YarnLaunchContext launchContext) {
    final ApplicationId appId = getContainerInfo();
    // Set the resource requirement for AM
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(Constants.APP_MASTER_MEMORY_MB);
    YarnUtils.setVirtualCores(capability, 1);
    // Put in extra environments
    Map<String, String> env = ImmutableMap.<String, String>builder().putAll(launchContext.getEnvironment()).put(EnvKeys.YARN_APP_ID, Integer.toString(appId.getId())).put(EnvKeys.YARN_APP_ID_CLUSTER_TIME, Long.toString(appId.getClusterTimestamp())).put(EnvKeys.YARN_APP_ID_STR, appId.toString()).put(EnvKeys.YARN_CONTAINER_MEMORY_MB, Integer.toString(Constants.APP_MASTER_MEMORY_MB)).put(EnvKeys.YARN_CONTAINER_VIRTUAL_CORES, Integer.toString(YarnUtils.getVirtualCores(capability))).build();
    launchContext.setEnvironment(env);
    return (ProcessController<R>) submitter.submit(launchContext, capability);
}
Also used : ProcessController(com.continuuity.weave.internal.ProcessController) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Aggregations

Resource (org.apache.hadoop.yarn.api.records.Resource)500 Test (org.junit.Test)190 NodeId (org.apache.hadoop.yarn.api.records.NodeId)89 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)82 Priority (org.apache.hadoop.yarn.api.records.Priority)80 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)67 HashMap (java.util.HashMap)62 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)57 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)55 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)53 ArrayList (java.util.ArrayList)49 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)48 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)45 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)43 Container (org.apache.hadoop.yarn.api.records.Container)42 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)42 Configuration (org.apache.hadoop.conf.Configuration)34 IOException (java.io.IOException)33 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)33 Map (java.util.Map)29