Search in sources :

Example 81 with ApplicationId

use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.

the class TestContainerLogsPage method testContainerLogPageAccess.

@Test(timeout = 10000)
public void testContainerLogPageAccess() throws IOException {
    // SecureIOUtils require Native IO to be enabled. This test will run
    // only if it is enabled.
    assumeTrue(NativeIO.isAvailable());
    String user = "randomUser" + System.currentTimeMillis();
    File absLogDir = null, appDir = null, containerDir = null, syslog = null;
    try {
        // target log directory
        absLogDir = new File("target", TestContainerLogsPage.class.getSimpleName() + "LogDir").getAbsoluteFile();
        absLogDir.mkdir();
        Configuration conf = new Configuration();
        conf.set(YarnConfiguration.NM_LOG_DIRS, absLogDir.toURI().toString());
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);
        NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
        healthChecker.init(conf);
        LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
        // Add an application and the corresponding containers
        RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
        long clusterTimeStamp = 1234;
        ApplicationId appId = BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
        Application app = mock(Application.class);
        when(app.getAppId()).thenReturn(appId);
        // Making sure that application returns a random user. This is required
        // for SecureIOUtils' file owner check.
        when(app.getUser()).thenReturn(user);
        ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
        ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
        // Testing secure read access for log files
        // Creating application and container directory and syslog file.
        appDir = new File(absLogDir, appId.toString());
        appDir.mkdir();
        containerDir = new File(appDir, container1.toString());
        containerDir.mkdir();
        syslog = new File(containerDir, "syslog");
        syslog.createNewFile();
        BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(syslog));
        out.write("Log file Content".getBytes());
        out.close();
        Context context = mock(Context.class);
        ConcurrentMap<ApplicationId, Application> appMap = new ConcurrentHashMap<ApplicationId, Application>();
        appMap.put(appId, app);
        when(context.getApplications()).thenReturn(appMap);
        ConcurrentHashMap<ContainerId, Container> containers = new ConcurrentHashMap<ContainerId, Container>();
        when(context.getContainers()).thenReturn(containers);
        when(context.getLocalDirsHandler()).thenReturn(dirsHandler);
        MockContainer container = new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user, appId, 1);
        container.setState(ContainerState.RUNNING);
        context.getContainers().put(container1, container);
        ContainersLogsBlock cLogsBlock = new ContainersLogsBlock(context);
        Map<String, String> params = new HashMap<String, String>();
        params.put(YarnWebParams.CONTAINER_ID, container1.toString());
        params.put(YarnWebParams.CONTAINER_LOG_TYPE, "syslog");
        Injector injector = WebAppTests.testPage(ContainerLogsPage.class, ContainersLogsBlock.class, cLogsBlock, params, (Module[]) null);
        PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
        verify(spyPw).write("Exception reading log file. Application submitted by '" + user + "' doesn't own requested log file : syslog");
    } finally {
        if (syslog != null) {
            syslog.delete();
        }
        if (containerDir != null) {
            containerDir.delete();
        }
        if (appDir != null) {
            appDir.delete();
        }
        if (absLogDir != null) {
            absLogDir.delete();
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) NodeHealthCheckerService(org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) Injector(com.google.inject.Injector) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) BufferedOutputStream(java.io.BufferedOutputStream) PrintWriter(java.io.PrintWriter) NMContext(org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext) Context(org.apache.hadoop.yarn.server.nodemanager.Context) ContainersLogsBlock(org.apache.hadoop.yarn.server.nodemanager.webapp.ContainerLogsPage.ContainersLogsBlock) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) RecordFactory(org.apache.hadoop.yarn.factories.RecordFactory) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) FileOutputStream(java.io.FileOutputStream) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Module(com.google.inject.Module) File(java.io.File) Application(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application) Test(org.junit.Test)

Example 82 with ApplicationId

use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.

the class TestContainerLogsPage method testContainerLogDirs.

@Test(timeout = 30000)
public void testContainerLogDirs() throws IOException, YarnException {
    File absLogDir = new File("target", TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
    String logdirwithFile = absLogDir.toURI().toString();
    Configuration conf = new Configuration();
    conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
    NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
    healthChecker.init(conf);
    LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
    NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
    // Add an application and the corresponding containers
    RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
    String user = "nobody";
    long clusterTimeStamp = 1234;
    ApplicationId appId = BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
    Application app = mock(Application.class);
    when(app.getUser()).thenReturn(user);
    when(app.getAppId()).thenReturn(appId);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
    ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
    nmContext.getApplications().put(appId, app);
    MockContainer container = new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user, appId, 1);
    container.setState(ContainerState.RUNNING);
    nmContext.getContainers().put(container1, container);
    List<File> files = null;
    files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
    Assert.assertTrue(!(files.get(0).toString().contains("file:")));
    // After container is completed, it is removed from nmContext
    nmContext.getContainers().remove(container1);
    Assert.assertNull(nmContext.getContainers().get(container1));
    files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
    Assert.assertTrue(!(files.get(0).toString().contains("file:")));
    // Create a new context to check if correct container log dirs are fetched
    // on full disk.
    LocalDirsHandlerService dirsHandlerForFullDisk = spy(dirsHandler);
    // good log dirs are empty and nm log dir is in the full log dir list.
    when(dirsHandlerForFullDisk.getLogDirs()).thenReturn(new ArrayList<String>());
    when(dirsHandlerForFullDisk.getLogDirsForRead()).thenReturn(Arrays.asList(new String[] { absLogDir.getAbsolutePath() }));
    nmContext = new NodeManager.NMContext(null, null, dirsHandlerForFullDisk, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
    nmContext.getApplications().put(appId, app);
    container.setState(ContainerState.RUNNING);
    nmContext.getContainers().put(container1, container);
    List<File> dirs = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
    File containerLogDir = new File(absLogDir, appId + "/" + container1);
    Assert.assertTrue(dirs.contains(containerLogDir));
}
Also used : NMContext(org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) NodeHealthCheckerService(org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) NMNullStateStoreService(org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService) NodeManager(org.apache.hadoop.yarn.server.nodemanager.NodeManager) ApplicationACLsManager(org.apache.hadoop.yarn.server.security.ApplicationACLsManager) RecordFactory(org.apache.hadoop.yarn.factories.RecordFactory) NMContext(org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) Application(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application) Test(org.junit.Test)

Example 83 with ApplicationId

use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.

the class TestContainerLogsPage method testContainerLogFile.

@Test(timeout = 30000)
public void testContainerLogFile() throws IOException, YarnException {
    File absLogDir = new File("target", TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
    String logdirwithFile = absLogDir.toURI().toString();
    Configuration conf = new Configuration();
    conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
    conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 0.0f);
    LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
    dirsHandler.init(conf);
    NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
    // Add an application and the corresponding containers
    String user = "nobody";
    long clusterTimeStamp = 1234;
    ApplicationId appId = BuilderUtils.newApplicationId(clusterTimeStamp, 1);
    Application app = mock(Application.class);
    when(app.getUser()).thenReturn(user);
    when(app.getAppId()).thenReturn(appId);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
    nmContext.getApplications().put(appId, app);
    MockContainer container = new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user, appId, 1);
    container.setState(ContainerState.RUNNING);
    nmContext.getContainers().put(containerId, container);
    File containerLogDir = new File(absLogDir, ContainerLaunch.getRelativeContainerLogDir(appId.toString(), containerId.toString()));
    containerLogDir.mkdirs();
    String fileName = "fileName";
    File containerLogFile = new File(containerLogDir, fileName);
    containerLogFile.createNewFile();
    File file = ContainerLogsUtils.getContainerLogFile(containerId, fileName, user, nmContext);
    Assert.assertEquals(containerLogFile.toURI().toString(), file.toURI().toString());
    FileUtil.fullyDelete(absLogDir);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) NMNullStateStoreService(org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService) ApplicationACLsManager(org.apache.hadoop.yarn.server.security.ApplicationACLsManager) NMContext(org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) Application(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application) Test(org.junit.Test)

Example 84 with ApplicationId

use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.

the class TestContainersMonitor method testContainerKillOnMemoryOverflow.

@Test
public void testContainerKillOnMemoryOverflow() throws IOException, InterruptedException, YarnException {
    if (!ProcfsBasedProcessTree.isAvailable()) {
        return;
    }
    containerManager.start();
    File scriptFile = new File(tmpDir, "scriptFile.sh");
    PrintWriter fileWriter = new PrintWriter(scriptFile);
    File processStartFile = new File(tmpDir, "start_file.txt").getAbsoluteFile();
    // So that start file is readable by the
    fileWriter.write("\numask 0");
    // test.
    fileWriter.write("\necho Hello World! > " + processStartFile);
    fileWriter.write("\necho $$ >> " + processStartFile);
    fileWriter.write("\nsleep 15");
    fileWriter.close();
    ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    // ////// Construct the Container-id
    ApplicationId appId = ApplicationId.newInstance(0, 0);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
    URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
    LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
    rsrc_alpha.setResource(resource_alpha);
    rsrc_alpha.setSize(-1);
    rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
    rsrc_alpha.setType(LocalResourceType.FILE);
    rsrc_alpha.setTimestamp(scriptFile.lastModified());
    String destinationFile = "dest_file";
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put(destinationFile, rsrc_alpha);
    containerLaunchContext.setLocalResources(localResources);
    List<String> commands = new ArrayList<String>();
    commands.add("/bin/bash");
    commands.add(scriptFile.getAbsolutePath());
    containerLaunchContext.setCommands(commands);
    Resource r = BuilderUtils.newResource(0, 0);
    ContainerTokenIdentifier containerIdentifier = new ContainerTokenIdentifier(cId, context.getNodeId().toString(), user, r, System.currentTimeMillis() + 120000, 123, DUMMY_RM_IDENTIFIER, Priority.newInstance(0), 0);
    Token containerToken = BuilderUtils.newContainerToken(context.getNodeId(), containerManager.getContext().getContainerTokenSecretManager().createPassword(containerIdentifier), containerIdentifier);
    StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, containerToken);
    List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
    list.add(scRequest);
    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
    containerManager.startContainers(allRequests);
    int timeoutSecs = 0;
    while (!processStartFile.exists() && timeoutSecs++ < 20) {
        Thread.sleep(1000);
        LOG.info("Waiting for process start-file to be created");
    }
    Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists());
    // Now verify the contents of the file
    BufferedReader reader = new BufferedReader(new FileReader(processStartFile));
    Assert.assertEquals("Hello World!", reader.readLine());
    // Get the pid of the process
    String pid = reader.readLine().trim();
    // No more lines
    Assert.assertEquals(null, reader.readLine());
    BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE, 60);
    List<ContainerId> containerIds = new ArrayList<ContainerId>();
    containerIds.add(cId);
    GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
    ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
    Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM, containerStatus.getExitStatus());
    String expectedMsgPattern = "Container \\[pid=" + pid + ",containerID=" + cId + "\\] is running beyond virtual memory limits. Current usage: " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. " + "Killing container.\nDump of the process-tree for " + cId + " :\n";
    Pattern pat = Pattern.compile(expectedMsgPattern);
    Assert.assertEquals("Expected message pattern is: " + expectedMsgPattern + "\n\nObserved message is: " + containerStatus.getDiagnostics(), true, pat.matcher(containerStatus.getDiagnostics()).find());
    // Assert that the process is not alive anymore
    Assert.assertFalse("Process is still alive!", exec.signalContainer(new ContainerSignalContext.Builder().setUser(user).setPid(pid).setSignal(Signal.NULL).build()));
}
Also used : HashMap(java.util.HashMap) GetContainerStatusesRequest(org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.yarn.api.records.Token) URL(org.apache.hadoop.yarn.api.records.URL) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileReader(java.io.FileReader) PrintWriter(java.io.PrintWriter) Path(org.apache.hadoop.fs.Path) StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) Pattern(java.util.regex.Pattern) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) BufferedReader(java.io.BufferedReader) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 85 with ApplicationId

use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.

the class TestLogAggregationService method testInvalidPolicyClassName.

// The application specifies invalid policy class
// NM should fallback to the default policy which is to aggregate all
// containers.
@Test(timeout = 50000)
@SuppressWarnings("unchecked")
public void testInvalidPolicyClassName() throws Exception {
    ApplicationId appId = createApplication();
    LogAggregationService logAggregationService = createLogAggregationService(appId, "foo", null, true);
    verifyDefaultPolicy(appId, logAggregationService);
}
Also used : ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Aggregations

ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)648 Test (org.junit.Test)338 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)221 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)173 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)137 Configuration (org.apache.hadoop.conf.Configuration)127 IOException (java.io.IOException)119 Path (org.apache.hadoop.fs.Path)107 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)102 ArrayList (java.util.ArrayList)96 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)71 HashMap (java.util.HashMap)65 ApplicationReport (org.apache.hadoop.yarn.api.records.ApplicationReport)61 Resource (org.apache.hadoop.yarn.api.records.Resource)57 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)53 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)51 File (java.io.File)49 Credentials (org.apache.hadoop.security.Credentials)49 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)47 ApplicationSubmissionContext (org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext)47