Search in sources :

Example 1 with NodeReport

use of org.apache.hadoop.yarn.api.records.NodeReport in project flink by apache.

the class AbstractYarnClusterDescriptor method getClusterDescription.

@Override
public String getClusterDescription() {
    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        PrintStream ps = new PrintStream(baos);
        YarnClient yarnClient = getYarnClient();
        YarnClusterMetrics metrics = yarnClient.getYarnClusterMetrics();
        ps.append("NodeManagers in the ClusterClient " + metrics.getNumNodeManagers());
        List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING);
        final String format = "|%-16s |%-16s %n";
        ps.printf("|Property         |Value          %n");
        ps.println("+---------------------------------------+");
        int totalMemory = 0;
        int totalCores = 0;
        for (NodeReport rep : nodes) {
            final Resource res = rep.getCapability();
            totalMemory += res.getMemory();
            totalCores += res.getVirtualCores();
            ps.format(format, "NodeID", rep.getNodeId());
            ps.format(format, "Memory", res.getMemory() + " MB");
            ps.format(format, "vCores", res.getVirtualCores());
            ps.format(format, "HealthReport", rep.getHealthReport());
            ps.format(format, "Containers", rep.getNumContainers());
            ps.println("+---------------------------------------+");
        }
        ps.println("Summary: totalMemory " + totalMemory + " totalCores " + totalCores);
        List<QueueInfo> qInfo = yarnClient.getAllQueues();
        for (QueueInfo q : qInfo) {
            ps.println("Queue: " + q.getQueueName() + ", Current Capacity: " + q.getCurrentCapacity() + " Max Capacity: " + q.getMaximumCapacity() + " Applications: " + q.getApplications().size());
        }
        yarnClient.stop();
        return baos.toString();
    } catch (Exception e) {
        throw new RuntimeException("Couldn't get cluster description", e);
    }
}
Also used : QueueInfo(org.apache.hadoop.yarn.api.records.QueueInfo) PrintStream(java.io.PrintStream) YarnClusterMetrics(org.apache.hadoop.yarn.api.records.YarnClusterMetrics) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ByteArrayOutputStream(java.io.ByteArrayOutputStream) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) URISyntaxException(java.net.URISyntaxException) InvocationTargetException(java.lang.reflect.InvocationTargetException) IllegalConfigurationException(org.apache.flink.configuration.IllegalConfigurationException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) IOException(java.io.IOException) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport)

Example 2 with NodeReport

use of org.apache.hadoop.yarn.api.records.NodeReport in project hadoop by apache.

the class TestJobImpl method testUnusableNodeTransition.

@Test(timeout = 20000)
public void testUnusableNodeTransition() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.setInt(MRJobConfig.NUM_REDUCES, 1);
    DrainDispatcher dispatcher = new DrainDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    CyclicBarrier syncBarrier = new CyclicBarrier(2);
    OutputCommitter committer = new TestingOutputCommitter(syncBarrier, true);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    final JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
    // add a special task event handler to put the task back to running in case
    // of task rescheduling/killing
    EventHandler<TaskAttemptEvent> taskAttemptEventHandler = new EventHandler<TaskAttemptEvent>() {

        @Override
        public void handle(TaskAttemptEvent event) {
            if (event.getType() == TaskAttemptEventType.TA_KILL) {
                job.decrementSucceededMapperCount();
            }
        }
    };
    dispatcher.register(TaskAttemptEventType.class, taskAttemptEventHandler);
    // replace the tasks with spied versions to return the right attempts
    Map<TaskId, Task> spiedTasks = new HashMap<TaskId, Task>();
    List<NodeReport> nodeReports = new ArrayList<NodeReport>();
    Map<NodeReport, TaskId> nodeReportsToTaskIds = new HashMap<NodeReport, TaskId>();
    for (Map.Entry<TaskId, Task> e : job.tasks.entrySet()) {
        TaskId taskId = e.getKey();
        Task task = e.getValue();
        if (taskId.getTaskType() == TaskType.MAP) {
            // add an attempt to the task to simulate nodes
            NodeId nodeId = mock(NodeId.class);
            TaskAttempt attempt = mock(TaskAttempt.class);
            when(attempt.getNodeId()).thenReturn(nodeId);
            TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
            when(attempt.getID()).thenReturn(attemptId);
            // create a spied task
            Task spied = spy(task);
            doReturn(attempt).when(spied).getAttempt(any(TaskAttemptId.class));
            spiedTasks.put(taskId, spied);
            // create a NodeReport based on the node id
            NodeReport report = mock(NodeReport.class);
            when(report.getNodeState()).thenReturn(NodeState.UNHEALTHY);
            when(report.getNodeId()).thenReturn(nodeId);
            nodeReports.add(report);
            nodeReportsToTaskIds.put(report, taskId);
        }
    }
    // replace the tasks with the spied tasks
    job.tasks.putAll(spiedTasks);
    // complete all mappers first
    for (TaskId taskId : job.tasks.keySet()) {
        if (taskId.getTaskType() == TaskType.MAP) {
            // generate a task attempt completed event first to populate the
            // nodes-to-succeeded-attempts map
            TaskAttemptCompletionEvent tce = Records.newRecord(TaskAttemptCompletionEvent.class);
            TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
            tce.setAttemptId(attemptId);
            tce.setStatus(TaskAttemptCompletionEventStatus.SUCCEEDED);
            job.handle(new JobTaskAttemptCompletedEvent(tce));
            // complete the task itself
            job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
            Assert.assertEquals(JobState.RUNNING, job.getState());
        }
    }
    // add an event for a node transition
    NodeReport firstMapperNodeReport = nodeReports.get(0);
    NodeReport secondMapperNodeReport = nodeReports.get(1);
    job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(firstMapperNodeReport)));
    dispatcher.await();
    // complete the reducer
    for (TaskId taskId : job.tasks.keySet()) {
        if (taskId.getTaskType() == TaskType.REDUCE) {
            job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
        }
    }
    // add another event for a node transition for the other mapper
    // this should not trigger rescheduling
    job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(secondMapperNodeReport)));
    // complete the first mapper that was rescheduled
    TaskId firstMapper = nodeReportsToTaskIds.get(firstMapperNodeReport);
    job.handle(new JobTaskEvent(firstMapper, TaskState.SUCCEEDED));
    // verify the state is moving to committing
    assertJobState(job, JobStateInternal.COMMITTING);
    // let the committer complete and verify the job succeeds
    syncBarrier.await();
    assertJobState(job, JobStateInternal.SUCCEEDED);
    dispatcher.stop();
    commitHandler.stop();
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) EventHandler(org.apache.hadoop.yarn.event.EventHandler) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) CyclicBarrier(java.util.concurrent.CyclicBarrier) JobTaskAttemptCompletedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptCompletedEvent) NodeId(org.apache.hadoop.yarn.api.records.NodeId) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) Map(java.util.Map) HashMap(java.util.HashMap) JobUpdatedNodesEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Test(org.junit.Test)

Example 3 with NodeReport

use of org.apache.hadoop.yarn.api.records.NodeReport in project hadoop by apache.

the class BaseAMRMProxyE2ETest method createAllocateRequest.

protected AllocateRequest createAllocateRequest(List<NodeReport> listNode) {
    // The test needs AMRMClient to create a real allocate request
    AMRMClientImpl<AMRMClient.ContainerRequest> amClient = new AMRMClientImpl<>();
    Resource capability = Resource.newInstance(1024, 2);
    Priority priority = Priority.newInstance(1);
    List<NodeReport> nodeReports = listNode;
    String node = nodeReports.get(0).getNodeId().getHost();
    String[] nodes = new String[] { node };
    AMRMClient.ContainerRequest storedContainer1 = new AMRMClient.ContainerRequest(capability, nodes, null, priority);
    amClient.addContainerRequest(storedContainer1);
    amClient.addContainerRequest(storedContainer1);
    List<ResourceRequest> resourceAsk = new ArrayList<>();
    for (ResourceRequest rr : amClient.ask) {
        resourceAsk.add(rr);
    }
    ResourceBlacklistRequest resourceBlacklistRequest = ResourceBlacklistRequest.newInstance(new ArrayList<>(), new ArrayList<>());
    int responseId = 1;
    return AllocateRequest.newInstance(responseId, 0, resourceAsk, new ArrayList<>(), resourceBlacklistRequest);
}
Also used : AMRMClient(org.apache.hadoop.yarn.client.api.AMRMClient) ResourceBlacklistRequest(org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ArrayList(java.util.ArrayList) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport)

Example 4 with NodeReport

use of org.apache.hadoop.yarn.api.records.NodeReport in project hadoop by apache.

the class TestDistributedScheduling method testAMOpportunistic.

/**
   * Check if an AM can ask for opportunistic containers and get them.
   * @throws Exception
   */
@Test
public void testAMOpportunistic() throws Exception {
    // Basic container to request
    Resource capability = Resource.newInstance(1024, 1);
    Priority priority = Priority.newInstance(1);
    // Get the cluster topology
    List<NodeReport> nodeReports = rmClient.getNodeReports(NodeState.RUNNING);
    String node = nodeReports.get(0).getNodeId().getHost();
    String rack = nodeReports.get(0).getRackName();
    String[] nodes = new String[] { node };
    String[] racks = new String[] { rack };
    // Create an AM to request resources
    AMRMClient<AMRMClient.ContainerRequest> amClient = null;
    try {
        amClient = new AMRMClientImpl<AMRMClient.ContainerRequest>(client);
        amClient.init(yarnConf);
        amClient.start();
        amClient.registerApplicationMaster(NetUtils.getHostname(), 1024, "");
        // AM requests an opportunistic container
        ExecutionTypeRequest execTypeRequest = ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true);
        ContainerRequest containerRequest = new AMRMClient.ContainerRequest(capability, nodes, racks, priority, 0, true, null, execTypeRequest);
        amClient.addContainerRequest(containerRequest);
        // Wait until the container is allocated
        ContainerId opportunisticContainerId = null;
        for (int i = 0; i < 10 && opportunisticContainerId == null; i++) {
            AllocateResponse allocResponse = amClient.allocate(0.1f);
            List<Container> allocatedContainers = allocResponse.getAllocatedContainers();
            for (Container allocatedContainer : allocatedContainers) {
                // Check that this is the container we required
                assertEquals(ExecutionType.OPPORTUNISTIC, allocatedContainer.getExecutionType());
                opportunisticContainerId = allocatedContainer.getId();
            }
            sleep(100);
        }
        assertNotNull(opportunisticContainerId);
        // The RM sees the container as OPPORTUNISTIC
        ResourceScheduler scheduler = cluster.getResourceManager().getResourceScheduler();
        RMContainer rmContainer = scheduler.getRMContainer(opportunisticContainerId);
        assertEquals(ExecutionType.OPPORTUNISTIC, rmContainer.getExecutionType());
        // Release the opportunistic container
        amClient.releaseAssignedContainer(opportunisticContainerId);
        // Wait for the release container to appear
        boolean released = false;
        for (int i = 0; i < 10 && !released; i++) {
            AllocateResponse allocResponse = amClient.allocate(0.1f);
            List<ContainerStatus> completedContainers = allocResponse.getCompletedContainersStatuses();
            for (ContainerStatus completedContainer : completedContainers) {
                ContainerId completedContainerId = completedContainer.getContainerId();
                assertEquals(completedContainerId, opportunisticContainerId);
                released = true;
            }
            if (!released) {
                sleep(100);
            }
        }
        assertTrue(released);
        // The RM shouldn't see the container anymore
        rmContainer = scheduler.getRMContainer(opportunisticContainerId);
        assertNull(rmContainer);
        // Clean the AM
        amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null);
    } finally {
        if (amClient != null && amClient.getServiceState() == Service.STATE.STARTED) {
            amClient.close();
        }
    }
}
Also used : ExecutionTypeRequest(org.apache.hadoop.yarn.api.records.ExecutionTypeRequest) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) AllocateResponse(org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ContainerRequest(org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest) ResourceScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Test(org.junit.Test)

Example 5 with NodeReport

use of org.apache.hadoop.yarn.api.records.NodeReport in project hadoop by apache.

the class TestDistributedScheduling method testAMRMClient.

/**
   * Validates if AMRMClient can be used with Distributed Scheduling turned on.
   *
   * @throws Exception
   */
@Test(timeout = 120000)
@SuppressWarnings("unchecked")
public void testAMRMClient() throws Exception {
    AMRMClientImpl<AMRMClient.ContainerRequest> amClient = null;
    try {
        Priority priority = Priority.newInstance(1);
        Priority priority2 = Priority.newInstance(2);
        Resource capability = Resource.newInstance(1024, 1);
        List<NodeReport> nodeReports = rmClient.getNodeReports(NodeState.RUNNING);
        String node = nodeReports.get(0).getNodeId().getHost();
        String rack = nodeReports.get(0).getRackName();
        String[] nodes = new String[] { node };
        String[] racks = new String[] { rack };
        // start am rm client
        amClient = new AMRMClientImpl(client);
        amClient.init(yarnConf);
        amClient.start();
        amClient.registerApplicationMaster(NetUtils.getHostname(), 1024, "");
        assertEquals(0, amClient.ask.size());
        assertEquals(0, amClient.release.size());
        amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
        amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
        amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
        amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
        amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
        amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
        amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
        amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
        amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
        RemoteRequestsTable<ContainerRequest> remoteRequestsTable = amClient.getTable(0);
        int containersRequestedNode = remoteRequestsTable.get(priority, node, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
        int containersRequestedRack = remoteRequestsTable.get(priority, rack, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
        int containersRequestedAny = remoteRequestsTable.get(priority, ResourceRequest.ANY, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
        int oppContainersRequestedAny = remoteRequestsTable.get(priority2, ResourceRequest.ANY, ExecutionType.OPPORTUNISTIC, capability).remoteRequest.getNumContainers();
        assertEquals(2, containersRequestedNode);
        assertEquals(2, containersRequestedRack);
        assertEquals(2, containersRequestedAny);
        assertEquals(1, oppContainersRequestedAny);
        assertEquals(4, amClient.ask.size());
        assertEquals(0, amClient.release.size());
        // RM should allocate container within 2 calls to allocate()
        int allocatedContainerCount = 0;
        int iterationsLeft = 10;
        Set<ContainerId> releases = new TreeSet<>();
        amClient.getNMTokenCache().clearCache();
        Assert.assertEquals(0, amClient.getNMTokenCache().numberOfTokensInCache());
        HashMap<String, Token> receivedNMTokens = new HashMap<>();
        while (allocatedContainerCount < (containersRequestedAny + oppContainersRequestedAny) && iterationsLeft-- > 0) {
            AllocateResponse allocResponse = amClient.allocate(0.1f);
            assertEquals(0, amClient.ask.size());
            assertEquals(0, amClient.release.size());
            allocatedContainerCount += allocResponse.getAllocatedContainers().size();
            for (Container container : allocResponse.getAllocatedContainers()) {
                ContainerId rejectContainerId = container.getId();
                releases.add(rejectContainerId);
            }
            for (NMToken token : allocResponse.getNMTokens()) {
                String nodeID = token.getNodeId().toString();
                receivedNMTokens.put(nodeID, token.getToken());
            }
            if (allocatedContainerCount < containersRequestedAny) {
                // sleep to let NM's heartbeat to RM and trigger allocations
                sleep(100);
            }
        }
        assertEquals(allocatedContainerCount, containersRequestedAny + oppContainersRequestedAny);
        for (ContainerId rejectContainerId : releases) {
            amClient.releaseAssignedContainer(rejectContainerId);
        }
        assertEquals(3, amClient.release.size());
        assertEquals(0, amClient.ask.size());
        // need to tell the AMRMClient that we dont need these resources anymore
        amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
        amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
        amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
        assertEquals(4, amClient.ask.size());
        // test RPC exception handling
        amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
        amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
        amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
        final AMRMClient amc = amClient;
        ApplicationMasterProtocol realRM = amClient.rmClient;
        try {
            ApplicationMasterProtocol mockRM = mock(ApplicationMasterProtocol.class);
            when(mockRM.allocate(any(AllocateRequest.class))).thenAnswer(new Answer<AllocateResponse>() {

                public AllocateResponse answer(InvocationOnMock invocation) throws Exception {
                    amc.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
                    amc.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
                    amc.removeContainerRequest(new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
                    throw new Exception();
                }
            });
            amClient.rmClient = mockRM;
            amClient.allocate(0.1f);
        } catch (Exception ioe) {
        } finally {
            amClient.rmClient = realRM;
        }
        assertEquals(3, amClient.release.size());
        assertEquals(6, amClient.ask.size());
        iterationsLeft = 3;
        // do a few iterations to ensure RM is not going send new containers
        while (iterationsLeft-- > 0) {
            // inform RM of rejection
            AllocateResponse allocResponse = amClient.allocate(0.1f);
            // RM did not send new containers because AM does not need any
            assertEquals(0, allocResponse.getAllocatedContainers().size());
            if (allocResponse.getCompletedContainersStatuses().size() > 0) {
                for (ContainerStatus cStatus : allocResponse.getCompletedContainersStatuses()) {
                    if (releases.contains(cStatus.getContainerId())) {
                        assertEquals(cStatus.getState(), ContainerState.COMPLETE);
                        assertEquals(-100, cStatus.getExitStatus());
                        releases.remove(cStatus.getContainerId());
                    }
                }
            }
            if (iterationsLeft > 0) {
                // sleep to make sure NM's heartbeat
                sleep(100);
            }
        }
        assertEquals(0, amClient.ask.size());
        assertEquals(0, amClient.release.size());
        amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null);
    } finally {
        if (amClient != null && amClient.getServiceState() == Service.STATE.STARTED) {
            amClient.stop();
        }
    }
}
Also used : AMRMClient(org.apache.hadoop.yarn.client.api.AMRMClient) HashMap(java.util.HashMap) AllocateRequest(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) NMToken(org.apache.hadoop.yarn.api.records.NMToken) Token(org.apache.hadoop.yarn.api.records.Token) ApplicationMasterProtocol(org.apache.hadoop.yarn.api.ApplicationMasterProtocol) AllocateResponse(org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TreeSet(java.util.TreeSet) ContainerRequest(org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest) ContainerRequest(org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest) NMToken(org.apache.hadoop.yarn.api.records.NMToken) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) InvocationOnMock(org.mockito.invocation.InvocationOnMock) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Test(org.junit.Test)

Aggregations

NodeReport (org.apache.hadoop.yarn.api.records.NodeReport)47 Test (org.junit.Test)18 ArrayList (java.util.ArrayList)17 Resource (org.apache.hadoop.yarn.api.records.Resource)13 GenericTestOperator (com.datatorrent.stram.engine.GenericTestOperator)10 LogicalPlan (com.datatorrent.stram.plan.logical.LogicalPlan)10 ContainerStartRequest (com.datatorrent.stram.StreamingContainerAgent.ContainerStartRequest)9 MemoryStorageAgent (com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent)9 NodeId (org.apache.hadoop.yarn.api.records.NodeId)9 File (java.io.File)8 NodeState (org.apache.hadoop.yarn.api.records.NodeState)7 HashMap (java.util.HashMap)6 HashSet (java.util.HashSet)6 Map (java.util.Map)5 Container (org.apache.hadoop.yarn.api.records.Container)5 IOException (java.io.IOException)4 PrintWriter (java.io.PrintWriter)4 Configuration (org.apache.hadoop.conf.Configuration)4 Priority (org.apache.hadoop.yarn.api.records.Priority)4 YarnClient (org.apache.hadoop.yarn.client.api.YarnClient)4