Search in sources :

Example 86 with AppContext

use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.

the class TestContainerLauncher method testPoolLimits.

@Test(timeout = 5000)
public void testPoolLimits() throws InterruptedException {
    ApplicationId appId = ApplicationId.newInstance(12345, 67);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 3);
    JobId jobId = MRBuilderUtils.newJobId(appId, 8);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP);
    TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
    ContainerId containerId = ContainerId.newContainerId(appAttemptId, 10);
    AppContext context = mock(AppContext.class);
    CustomContainerLauncher containerLauncher = new CustomContainerLauncher(context);
    Configuration conf = new Configuration();
    conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT, 12);
    containerLauncher.init(conf);
    containerLauncher.start();
    ThreadPoolExecutor threadPool = containerLauncher.getThreadPool();
    // 10 different hosts
    containerLauncher.expectedCorePoolSize = containerLauncher.initialPoolSize;
    for (int i = 0; i < 10; i++) {
        containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host" + i + ":1234", null, ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
    }
    waitForEvents(containerLauncher, 10);
    Assert.assertEquals(10, threadPool.getPoolSize());
    Assert.assertNull(containerLauncher.foundErrors);
    // 4 more different hosts, but thread pool size should be capped at 12
    containerLauncher.expectedCorePoolSize = 12;
    for (int i = 1; i <= 4; i++) {
        containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host1" + i + ":1234", null, ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
    }
    waitForEvents(containerLauncher, 12);
    Assert.assertEquals(12, threadPool.getPoolSize());
    Assert.assertNull(containerLauncher.foundErrors);
    // Make some threads ideal so that remaining events are also done.
    containerLauncher.finishEventHandling = true;
    waitForEvents(containerLauncher, 14);
    Assert.assertEquals(12, threadPool.getPoolSize());
    Assert.assertNull(containerLauncher.foundErrors);
    containerLauncher.stop();
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 87 with AppContext

use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.

the class TestContainerLauncherImpl method testHandle.

@Test(timeout = 5000)
public void testHandle() throws Exception {
    LOG.info("STARTING testHandle");
    AppContext mockContext = mock(AppContext.class);
    @SuppressWarnings("unchecked") EventHandler<Event> mockEventHandler = mock(EventHandler.class);
    when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
    String cmAddress = "127.0.0.1:8000";
    ContainerManagementProtocolClient mockCM = mock(ContainerManagementProtocolClient.class);
    ContainerLauncherImplUnderTest ut = new ContainerLauncherImplUnderTest(mockContext, mockCM);
    Configuration conf = new Configuration();
    ut.init(conf);
    ut.start();
    try {
        ContainerId contId = makeContainerId(0l, 0, 0, 1);
        TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
        StartContainersResponse startResp = recordFactory.newRecordInstance(StartContainersResponse.class);
        startResp.setAllServicesMetaData(serviceResponse);
        LOG.info("inserting launch event");
        ContainerRemoteLaunchEvent mockLaunchEvent = mock(ContainerRemoteLaunchEvent.class);
        when(mockLaunchEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
        when(mockLaunchEvent.getContainerID()).thenReturn(contId);
        when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
        when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
        when(mockCM.startContainers(any(StartContainersRequest.class))).thenReturn(startResp);
        when(mockLaunchEvent.getContainerToken()).thenReturn(createNewContainerToken(contId, cmAddress));
        ut.handle(mockLaunchEvent);
        ut.waitForPoolToIdle();
        verify(mockCM).startContainers(any(StartContainersRequest.class));
        LOG.info("inserting cleanup event");
        ContainerLauncherEvent mockCleanupEvent = mock(ContainerLauncherEvent.class);
        when(mockCleanupEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
        when(mockCleanupEvent.getContainerID()).thenReturn(contId);
        when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
        when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
        ut.handle(mockCleanupEvent);
        ut.waitForPoolToIdle();
        verify(mockCM).stopContainers(any(StopContainersRequest.class));
    } finally {
        ut.stop();
    }
}
Also used : StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) Configuration(org.apache.hadoop.conf.Configuration) StartContainersResponse(org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) Event(org.apache.hadoop.yarn.event.Event) StopContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest) Test(org.junit.Test)

Example 88 with AppContext

use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.

the class TestContainerLauncherImpl method testMyShutdown.

@Test(timeout = 5000)
public void testMyShutdown() throws Exception {
    LOG.info("in test Shutdown");
    AppContext mockContext = mock(AppContext.class);
    @SuppressWarnings("unchecked") EventHandler<Event> mockEventHandler = mock(EventHandler.class);
    when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
    ContainerManagementProtocolClient mockCM = mock(ContainerManagementProtocolClient.class);
    ContainerLauncherImplUnderTest ut = new ContainerLauncherImplUnderTest(mockContext, mockCM);
    Configuration conf = new Configuration();
    ut.init(conf);
    ut.start();
    try {
        ContainerId contId = makeContainerId(0l, 0, 0, 1);
        TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
        String cmAddress = "127.0.0.1:8000";
        StartContainersResponse startResp = recordFactory.newRecordInstance(StartContainersResponse.class);
        startResp.setAllServicesMetaData(serviceResponse);
        LOG.info("inserting launch event");
        ContainerRemoteLaunchEvent mockLaunchEvent = mock(ContainerRemoteLaunchEvent.class);
        when(mockLaunchEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
        when(mockLaunchEvent.getContainerID()).thenReturn(contId);
        when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
        when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
        when(mockCM.startContainers(any(StartContainersRequest.class))).thenReturn(startResp);
        when(mockLaunchEvent.getContainerToken()).thenReturn(createNewContainerToken(contId, cmAddress));
        ut.handle(mockLaunchEvent);
        ut.waitForPoolToIdle();
        verify(mockCM).startContainers(any(StartContainersRequest.class));
    // skip cleanup and make sure stop kills the container
    } finally {
        ut.stop();
        verify(mockCM).stopContainers(any(StopContainersRequest.class));
    }
}
Also used : StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) Configuration(org.apache.hadoop.conf.Configuration) StartContainersResponse(org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) Event(org.apache.hadoop.yarn.event.Event) StopContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest) Test(org.junit.Test)

Example 89 with AppContext

use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.

the class TestAMWebServicesJobs method testJobIdXML.

@Test
public void testJobIdXML() throws Exception {
    WebResource r = resource();
    Map<JobId, Job> jobsMap = appContext.getAllJobs();
    for (JobId id : jobsMap.keySet()) {
        String jobId = MRApps.toString(id);
        ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
        assertEquals(MediaType.APPLICATION_XML_TYPE + "; " + JettyUtils.UTF_8, response.getType().toString());
        String xml = response.getEntity(String.class);
        DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
        DocumentBuilder db = dbf.newDocumentBuilder();
        InputSource is = new InputSource();
        is.setCharacterStream(new StringReader(xml));
        Document dom = db.parse(is);
        NodeList job = dom.getElementsByTagName("job");
        verifyAMJobXML(job, appContext);
    }
}
Also used : ClientResponse(com.sun.jersey.api.client.ClientResponse) InputSource(org.xml.sax.InputSource) DocumentBuilderFactory(javax.xml.parsers.DocumentBuilderFactory) DocumentBuilder(javax.xml.parsers.DocumentBuilder) NodeList(org.w3c.dom.NodeList) StringReader(java.io.StringReader) WebResource(com.sun.jersey.api.client.WebResource) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Document(org.w3c.dom.Document) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 90 with AppContext

use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.

the class TestBlocks method testSingleCounterBlock.

@Test
public void testSingleCounterBlock() {
    AppContext appCtx = mock(AppContext.class);
    View.ViewContext ctx = mock(View.ViewContext.class);
    JobId jobId = new JobIdPBImpl();
    jobId.setId(0);
    jobId.setAppId(ApplicationIdPBImpl.newInstance(0, 1));
    TaskId mapTaskId = new TaskIdPBImpl();
    mapTaskId.setId(0);
    mapTaskId.setTaskType(TaskType.MAP);
    mapTaskId.setJobId(jobId);
    Task mapTask = mock(Task.class);
    when(mapTask.getID()).thenReturn(mapTaskId);
    TaskReport mapReport = mock(TaskReport.class);
    when(mapTask.getReport()).thenReturn(mapReport);
    when(mapTask.getType()).thenReturn(TaskType.MAP);
    TaskId reduceTaskId = new TaskIdPBImpl();
    reduceTaskId.setId(0);
    reduceTaskId.setTaskType(TaskType.REDUCE);
    reduceTaskId.setJobId(jobId);
    Task reduceTask = mock(Task.class);
    when(reduceTask.getID()).thenReturn(reduceTaskId);
    TaskReport reduceReport = mock(TaskReport.class);
    when(reduceTask.getReport()).thenReturn(reduceReport);
    when(reduceTask.getType()).thenReturn(TaskType.REDUCE);
    Map<TaskId, Task> tasks = new HashMap<TaskId, Task>();
    tasks.put(mapTaskId, mapTask);
    tasks.put(reduceTaskId, reduceTask);
    Job job = mock(Job.class);
    when(job.getTasks()).thenReturn(tasks);
    when(appCtx.getJob(any(JobId.class))).thenReturn(job);
    // SingleCounter for map task
    SingleCounterBlockForMapTest blockForMapTest = spy(new SingleCounterBlockForMapTest(appCtx, ctx));
    PrintWriter pWriterForMapTest = new PrintWriter(data);
    Block htmlForMapTest = new BlockForTest(new HtmlBlockForTest(), pWriterForMapTest, 0, false);
    blockForMapTest.render(htmlForMapTest);
    pWriterForMapTest.flush();
    assertTrue(data.toString().contains("task_0_0001_m_000000"));
    assertFalse(data.toString().contains("task_0_0001_r_000000"));
    data.reset();
    // SingleCounter for reduce task
    SingleCounterBlockForReduceTest blockForReduceTest = spy(new SingleCounterBlockForReduceTest(appCtx, ctx));
    PrintWriter pWriterForReduceTest = new PrintWriter(data);
    Block htmlForReduceTest = new BlockForTest(new HtmlBlockForTest(), pWriterForReduceTest, 0, false);
    blockForReduceTest.render(htmlForReduceTest);
    pWriterForReduceTest.flush();
    System.out.println(data.toString());
    assertFalse(data.toString().contains("task_0_0001_m_000000"));
    assertTrue(data.toString().contains("task_0_0001_r_000000"));
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskReport(org.apache.hadoop.mapreduce.v2.api.records.TaskReport) TaskIdPBImpl(org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskIdPBImpl) HashMap(java.util.HashMap) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) View(org.apache.hadoop.yarn.webapp.View) BlockForTest(org.apache.hadoop.yarn.webapp.view.BlockForTest) JobIdPBImpl(org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl) HtmlBlock(org.apache.hadoop.yarn.webapp.view.HtmlBlock) Block(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block) FewAttemptsBlock(org.apache.hadoop.mapreduce.v2.app.webapp.AttemptsPage.FewAttemptsBlock) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) PrintWriter(java.io.PrintWriter) BlockForTest(org.apache.hadoop.yarn.webapp.view.BlockForTest) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)74 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)73 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)47 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)32 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)32 Configuration (org.apache.hadoop.conf.Configuration)31 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)26 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)22 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)21 Path (org.apache.hadoop.fs.Path)18 MockAppContext (org.apache.hadoop.mapreduce.v2.app.MockAppContext)18 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)18 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)17 Container (org.apache.hadoop.yarn.api.records.Container)14 JobConf (org.apache.hadoop.mapred.JobConf)13 TaskSplitMetaInfo (org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo)13 InetSocketAddress (java.net.InetSocketAddress)12 ClusterInfo (org.apache.hadoop.mapreduce.v2.app.ClusterInfo)12 TaskAttemptListener (org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener)12 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)12