Search in sources :

Example 81 with ContainerId

use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.

the class TestLogAggregationService method testLogAggregationAbsentContainer.

@Test(timeout = 50000)
public void testLogAggregationAbsentContainer() throws Exception {
    ApplicationId appId = createApplication();
    LogAggregationService logAggregationService = createLogAggregationService(appId, FailedOrKilledContainerLogAggregationPolicy.class, null);
    ApplicationAttemptId appAttemptId1 = BuilderUtils.newApplicationAttemptId(appId, 1);
    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId1, 2l);
    try {
        logAggregationService.handle(new LogHandlerContainerFinishedEvent(containerId, 100));
        assertTrue("Should skip when null containerID", true);
    } catch (Exception e) {
        Assert.assertFalse("Exception not expected should skip null containerid", true);
    }
}
Also used : ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) LogHandlerContainerFinishedEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) MultiException(org.eclipse.jetty.util.MultiException) IOException(java.io.IOException) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) UnsupportedFileSystemException(org.apache.hadoop.fs.UnsupportedFileSystemException) EOFException(java.io.EOFException) FileNotFoundException(java.io.FileNotFoundException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 82 with ContainerId

use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.

the class TestLogAggregationService method verifyLocalFileDeletion.

private void verifyLocalFileDeletion(LogAggregationService logAggregationService) throws Exception {
    logAggregationService.init(this.conf);
    logAggregationService.start();
    ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
    // AppLogDir should be created
    File app1LogDir = new File(localLogDir, application1.toString());
    app1LogDir.mkdir();
    logAggregationService.handle(new LogHandlerAppStartedEvent(application1, this.user, null, this.acls));
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(application1, 1);
    ContainerId container11 = createContainer(appAttemptId, 1, ContainerType.APPLICATION_MASTER);
    // Simulate log-file creation
    writeContainerLogs(app1LogDir, container11, new String[] { "stdout", "stderr", "syslog" });
    logAggregationService.handle(new LogHandlerContainerFinishedEvent(container11, 0));
    logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));
    logAggregationService.stop();
    assertEquals(0, logAggregationService.getNumAggregators());
    // ensure filesystems were closed
    verify(logAggregationService).closeFileSystems(any(UserGroupInformation.class));
    verify(delSrvc).delete(eq(user), eq((Path) null), eq(new Path(app1LogDir.getAbsolutePath())));
    String containerIdStr = container11.toString();
    File containerLogDir = new File(app1LogDir, containerIdStr);
    int count = 0;
    int maxAttempts = 50;
    for (String fileType : new String[] { "stdout", "stderr", "syslog" }) {
        File f = new File(containerLogDir, fileType);
        count = 0;
        while ((f.exists()) && (count < maxAttempts)) {
            count++;
            Thread.sleep(100);
        }
        Assert.assertFalse("File [" + f + "] was not deleted", f.exists());
    }
    count = 0;
    while ((app1LogDir.exists()) && (count < maxAttempts)) {
        count++;
        Thread.sleep(100);
    }
    Assert.assertFalse("Directory [" + app1LogDir + "] was not deleted", app1LogDir.exists());
    Path logFilePath = logAggregationService.getRemoteNodeLogFileForApp(application1, this.user);
    Assert.assertTrue("Log file [" + logFilePath + "] not found", new File(logFilePath.toUri().getPath()).exists());
    dispatcher.await();
    ApplicationEvent[] expectedEvents = new ApplicationEvent[] { new ApplicationEvent(appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) };
    checkEvents(appEventHandler, expectedEvents, true, "getType", "getApplicationID");
}
Also used : Path(org.apache.hadoop.fs.Path) ApplicationEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent) LogHandlerContainerFinishedEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LogHandlerAppStartedEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) LogHandlerAppFinishedEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 83 with ContainerId

use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.

the class TestLogAggregationService method testLogAggregationServiceWithPatterns.

@Test(timeout = 50000)
@SuppressWarnings("unchecked")
public void testLogAggregationServiceWithPatterns() throws Exception {
    LogAggregationContext logAggregationContextWithIncludePatterns = Records.newRecord(LogAggregationContext.class);
    String includePattern = "stdout|syslog";
    logAggregationContextWithIncludePatterns.setIncludePattern(includePattern);
    LogAggregationContext LogAggregationContextWithExcludePatterns = Records.newRecord(LogAggregationContext.class);
    String excludePattern = "stdout|syslog";
    LogAggregationContextWithExcludePatterns.setExcludePattern(excludePattern);
    this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
    this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath());
    ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
    ApplicationId application2 = BuilderUtils.newApplicationId(1234, 2);
    ApplicationId application3 = BuilderUtils.newApplicationId(1234, 3);
    ApplicationId application4 = BuilderUtils.newApplicationId(1234, 4);
    Application mockApp = mock(Application.class);
    when(mockApp.getContainers()).thenReturn(new HashMap<ContainerId, Container>());
    this.context.getApplications().put(application1, mockApp);
    this.context.getApplications().put(application2, mockApp);
    this.context.getApplications().put(application3, mockApp);
    this.context.getApplications().put(application4, mockApp);
    LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler);
    logAggregationService.init(this.conf);
    logAggregationService.start();
    // LogContext for application1 has includePatten which includes
    // stdout and syslog.
    // After logAggregation is finished, we expect the logs for application1
    // has only logs from stdout and syslog
    // AppLogDir should be created
    File appLogDir1 = new File(localLogDir, application1.toString());
    appLogDir1.mkdir();
    logAggregationService.handle(new LogHandlerAppStartedEvent(application1, this.user, null, this.acls, logAggregationContextWithIncludePatterns));
    ApplicationAttemptId appAttemptId1 = BuilderUtils.newApplicationAttemptId(application1, 1);
    ContainerId container1 = createContainer(appAttemptId1, 1, ContainerType.APPLICATION_MASTER);
    // Simulate log-file creation
    writeContainerLogs(appLogDir1, container1, new String[] { "stdout", "stderr", "syslog" });
    logAggregationService.handle(new LogHandlerContainerFinishedEvent(container1, 0));
    // LogContext for application2 has excludePatten which includes
    // stdout and syslog.
    // After logAggregation is finished, we expect the logs for application2
    // has only logs from stderr
    ApplicationAttemptId appAttemptId2 = BuilderUtils.newApplicationAttemptId(application2, 1);
    File app2LogDir = new File(localLogDir, application2.toString());
    app2LogDir.mkdir();
    LogAggregationContextWithExcludePatterns.setLogAggregationPolicyClassName(AMOnlyLogAggregationPolicy.class.getName());
    logAggregationService.handle(new LogHandlerAppStartedEvent(application2, this.user, null, this.acls, LogAggregationContextWithExcludePatterns));
    ContainerId container2 = createContainer(appAttemptId2, 1, ContainerType.APPLICATION_MASTER);
    writeContainerLogs(app2LogDir, container2, new String[] { "stdout", "stderr", "syslog" });
    logAggregationService.handle(new LogHandlerContainerFinishedEvent(container2, 0));
    // LogContext for application3 has includePattern which is *.log and
    // excludePatten which includes std.log and sys.log.
    // After logAggregation is finished, we expect the logs for application3
    // has all logs whose suffix is .log but excluding sys.log and std.log
    LogAggregationContext context1 = Records.newRecord(LogAggregationContext.class);
    context1.setIncludePattern(".*.log");
    context1.setExcludePattern("sys.log|std.log");
    ApplicationAttemptId appAttemptId3 = BuilderUtils.newApplicationAttemptId(application3, 1);
    File app3LogDir = new File(localLogDir, application3.toString());
    app3LogDir.mkdir();
    context1.setLogAggregationPolicyClassName(AMOnlyLogAggregationPolicy.class.getName());
    logAggregationService.handle(new LogHandlerAppStartedEvent(application3, this.user, null, this.acls, context1));
    ContainerId container3 = createContainer(appAttemptId3, 1, ContainerType.APPLICATION_MASTER);
    writeContainerLogs(app3LogDir, container3, new String[] { "stdout", "sys.log", "std.log", "out.log", "err.log", "log" });
    logAggregationService.handle(new LogHandlerContainerFinishedEvent(container3, 0));
    // LogContext for application4 has includePattern
    // which includes std.log and sys.log and
    // excludePatten which includes std.log.
    // After logAggregation is finished, we expect the logs for application4
    // only has sys.log
    LogAggregationContext context2 = Records.newRecord(LogAggregationContext.class);
    context2.setIncludePattern("sys.log|std.log");
    context2.setExcludePattern("std.log");
    ApplicationAttemptId appAttemptId4 = BuilderUtils.newApplicationAttemptId(application4, 1);
    File app4LogDir = new File(localLogDir, application4.toString());
    app4LogDir.mkdir();
    context2.setLogAggregationPolicyClassName(AMOnlyLogAggregationPolicy.class.getName());
    logAggregationService.handle(new LogHandlerAppStartedEvent(application4, this.user, null, this.acls, context2));
    ContainerId container4 = createContainer(appAttemptId4, 1, ContainerType.APPLICATION_MASTER);
    writeContainerLogs(app4LogDir, container4, new String[] { "stdout", "sys.log", "std.log", "out.log", "err.log", "log" });
    logAggregationService.handle(new LogHandlerContainerFinishedEvent(container4, 0));
    dispatcher.await();
    ApplicationEvent[] expectedInitEvents = new ApplicationEvent[] { new ApplicationEvent(application1, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(application2, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(application3, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(application4, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED) };
    checkEvents(appEventHandler, expectedInitEvents, false, "getType", "getApplicationID");
    reset(appEventHandler);
    logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));
    logAggregationService.handle(new LogHandlerAppFinishedEvent(application2));
    logAggregationService.handle(new LogHandlerAppFinishedEvent(application3));
    logAggregationService.handle(new LogHandlerAppFinishedEvent(application4));
    logAggregationService.stop();
    assertEquals(0, logAggregationService.getNumAggregators());
    String[] logFiles = new String[] { "stdout", "syslog" };
    verifyContainerLogs(logAggregationService, application1, new ContainerId[] { container1 }, logFiles, 2, false);
    logFiles = new String[] { "stderr" };
    verifyContainerLogs(logAggregationService, application2, new ContainerId[] { container2 }, logFiles, 1, false);
    logFiles = new String[] { "out.log", "err.log" };
    verifyContainerLogs(logAggregationService, application3, new ContainerId[] { container3 }, logFiles, 2, false);
    logFiles = new String[] { "sys.log" };
    verifyContainerLogs(logAggregationService, application4, new ContainerId[] { container4 }, logFiles, 1, false);
    dispatcher.await();
    ApplicationEvent[] expectedFinishedEvents = new ApplicationEvent[] { new ApplicationEvent(application1, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent(application2, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent(application3, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent(application4, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) };
    checkEvents(appEventHandler, expectedFinishedEvents, false, "getType", "getApplicationID");
}
Also used : ApplicationEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent) LogHandlerContainerFinishedEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LogHandlerAppStartedEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent) Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) LogHandlerAppFinishedEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Application(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application) File(java.io.File) LogAggregationContext(org.apache.hadoop.yarn.api.records.LogAggregationContext) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 84 with ContainerId

use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.

the class TestNMContainerTokenSecretManager method testRecovery.

@Test
public void testRecovery() throws IOException {
    YarnConfiguration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
    final NodeId nodeId = NodeId.newInstance("somehost", 1234);
    final ContainerId cid1 = BuilderUtils.newContainerId(1, 1, 1, 1);
    final ContainerId cid2 = BuilderUtils.newContainerId(2, 2, 2, 2);
    ContainerTokenKeyGeneratorForTest keygen = new ContainerTokenKeyGeneratorForTest(conf);
    NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService();
    stateStore.init(conf);
    stateStore.start();
    NMContainerTokenSecretManager secretMgr = new NMContainerTokenSecretManager(conf, stateStore);
    secretMgr.setNodeId(nodeId);
    MasterKey currentKey = keygen.generateKey();
    secretMgr.setMasterKey(currentKey);
    ContainerTokenIdentifier tokenId1 = createContainerTokenId(cid1, nodeId, "user1", secretMgr);
    ContainerTokenIdentifier tokenId2 = createContainerTokenId(cid2, nodeId, "user2", secretMgr);
    assertNotNull(secretMgr.retrievePassword(tokenId1));
    assertNotNull(secretMgr.retrievePassword(tokenId2));
    // restart and verify tokens still valid
    secretMgr = new NMContainerTokenSecretManager(conf, stateStore);
    secretMgr.setNodeId(nodeId);
    secretMgr.recover();
    assertEquals(currentKey, secretMgr.getCurrentKey());
    assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
    assertTrue(secretMgr.isValidStartContainerRequest(tokenId2));
    assertNotNull(secretMgr.retrievePassword(tokenId1));
    assertNotNull(secretMgr.retrievePassword(tokenId2));
    // roll master key and start a container
    secretMgr.startContainerSuccessful(tokenId2);
    currentKey = keygen.generateKey();
    secretMgr.setMasterKey(currentKey);
    // restart and verify tokens still valid due to prev key persist
    secretMgr = new NMContainerTokenSecretManager(conf, stateStore);
    secretMgr.setNodeId(nodeId);
    secretMgr.recover();
    assertEquals(currentKey, secretMgr.getCurrentKey());
    assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
    assertFalse(secretMgr.isValidStartContainerRequest(tokenId2));
    assertNotNull(secretMgr.retrievePassword(tokenId1));
    assertNotNull(secretMgr.retrievePassword(tokenId2));
    // roll master key again, restart, and verify keys no longer valid
    currentKey = keygen.generateKey();
    secretMgr.setMasterKey(currentKey);
    secretMgr = new NMContainerTokenSecretManager(conf, stateStore);
    secretMgr.setNodeId(nodeId);
    secretMgr.recover();
    assertEquals(currentKey, secretMgr.getCurrentKey());
    assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
    assertFalse(secretMgr.isValidStartContainerRequest(tokenId2));
    try {
        secretMgr.retrievePassword(tokenId1);
        fail("token should not be valid");
    } catch (InvalidToken e) {
    // expected
    }
    try {
        secretMgr.retrievePassword(tokenId2);
        fail("token should not be valid");
    } catch (InvalidToken e) {
    // expected
    }
    stateStore.close();
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) MasterKey(org.apache.hadoop.yarn.server.api.records.MasterKey) NodeId(org.apache.hadoop.yarn.api.records.NodeId) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) NMMemoryStateStoreService(org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier) Test(org.junit.Test)

Example 85 with ContainerId

use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.

the class TestCgroupsLCEResourcesHandler method testContainerLimits.

@Test
public void testContainerLimits() throws IOException {
    LinuxContainerExecutor mockLCE = new MockLinuxContainerExecutor();
    CustomCgroupsLCEResourceHandler handler = new CustomCgroupsLCEResourceHandler();
    handler.generateLimitsMode = true;
    YarnConfiguration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.NM_DISK_RESOURCE_ENABLED, true);
    final int numProcessors = 4;
    ResourceCalculatorPlugin plugin = Mockito.mock(ResourceCalculatorPlugin.class);
    Mockito.doReturn(numProcessors).when(plugin).getNumProcessors();
    Mockito.doReturn(numProcessors).when(plugin).getNumCores();
    handler.setConf(conf);
    handler.initConfig();
    // create mock cgroup
    File cpuCgroupMountDir = TestCGroupsHandlerImpl.createMockCgroupMount(cgroupDir, "cpu");
    // create mock mtab
    File mockMtab = TestCGroupsHandlerImpl.createMockMTab(cgroupDir);
    // setup our handler and call init()
    handler.setMtabFile(mockMtab.getAbsolutePath());
    handler.init(mockLCE, plugin);
    // check the controller paths map isn't empty
    ContainerId id = ContainerId.fromString("container_1_1_1_1");
    handler.preExecute(id, Resource.newInstance(1024, 1));
    Assert.assertNotNull(handler.getControllerPaths());
    // check values
    // default case - files shouldn't exist, strict mode off by default
    File containerCpuDir = new File(cpuCgroupMountDir, id.toString());
    Assert.assertTrue(containerCpuDir.exists());
    Assert.assertTrue(containerCpuDir.isDirectory());
    File periodFile = new File(containerCpuDir, "cpu.cfs_period_us");
    File quotaFile = new File(containerCpuDir, "cpu.cfs_quota_us");
    Assert.assertFalse(periodFile.exists());
    Assert.assertFalse(quotaFile.exists());
    // no files created because we're using all cpu
    FileUtils.deleteQuietly(containerCpuDir);
    conf.setBoolean(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, true);
    handler.initConfig();
    handler.preExecute(id, Resource.newInstance(1024, YarnConfiguration.DEFAULT_NM_VCORES));
    Assert.assertTrue(containerCpuDir.exists());
    Assert.assertTrue(containerCpuDir.isDirectory());
    periodFile = new File(containerCpuDir, "cpu.cfs_period_us");
    quotaFile = new File(containerCpuDir, "cpu.cfs_quota_us");
    Assert.assertFalse(periodFile.exists());
    Assert.assertFalse(quotaFile.exists());
    // 50% of CPU
    FileUtils.deleteQuietly(containerCpuDir);
    conf.setBoolean(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, true);
    handler.initConfig();
    handler.preExecute(id, Resource.newInstance(1024, YarnConfiguration.DEFAULT_NM_VCORES / 2));
    Assert.assertTrue(containerCpuDir.exists());
    Assert.assertTrue(containerCpuDir.isDirectory());
    periodFile = new File(containerCpuDir, "cpu.cfs_period_us");
    quotaFile = new File(containerCpuDir, "cpu.cfs_quota_us");
    Assert.assertTrue(periodFile.exists());
    Assert.assertTrue(quotaFile.exists());
    Assert.assertEquals(500 * 1000, readIntFromFile(periodFile));
    Assert.assertEquals(1000 * 1000, readIntFromFile(quotaFile));
    // CGroups set to 50% of CPU, container set to 50% of YARN CPU
    FileUtils.deleteQuietly(containerCpuDir);
    conf.setBoolean(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, true);
    conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 50);
    handler.initConfig();
    handler.init(mockLCE, plugin);
    handler.preExecute(id, Resource.newInstance(1024, YarnConfiguration.DEFAULT_NM_VCORES / 2));
    Assert.assertTrue(containerCpuDir.exists());
    Assert.assertTrue(containerCpuDir.isDirectory());
    periodFile = new File(containerCpuDir, "cpu.cfs_period_us");
    quotaFile = new File(containerCpuDir, "cpu.cfs_quota_us");
    Assert.assertTrue(periodFile.exists());
    Assert.assertTrue(quotaFile.exists());
    Assert.assertEquals(1000 * 1000, readIntFromFile(periodFile));
    Assert.assertEquals(1000 * 1000, readIntFromFile(quotaFile));
    FileUtils.deleteQuietly(cgroupDir);
}
Also used : LinuxContainerExecutor(org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor) ResourceCalculatorPlugin(org.apache.hadoop.yarn.util.ResourceCalculatorPlugin) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) Test(org.junit.Test)

Aggregations

ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)590 Test (org.junit.Test)339 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)173 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)169 ArrayList (java.util.ArrayList)161 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)119 Container (org.apache.hadoop.yarn.api.records.Container)104 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)94 MockNM (org.apache.hadoop.yarn.server.resourcemanager.MockNM)79 MockRM (org.apache.hadoop.yarn.server.resourcemanager.MockRM)78 Path (org.apache.hadoop.fs.Path)77 MockAM (org.apache.hadoop.yarn.server.resourcemanager.MockAM)77 HashMap (java.util.HashMap)75 Configuration (org.apache.hadoop.conf.Configuration)74 IOException (java.io.IOException)73 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)68 Resource (org.apache.hadoop.yarn.api.records.Resource)67 ContainerStatus (org.apache.hadoop.yarn.api.records.ContainerStatus)66 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)61 NodeId (org.apache.hadoop.yarn.api.records.NodeId)59