Search in sources :

Example 11 with StartContainersRequest

use of org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest in project hadoop by apache.

the class TestContainersMonitor method testContainerKillOnMemoryOverflow.

@Test
public void testContainerKillOnMemoryOverflow() throws IOException, InterruptedException, YarnException {
    if (!ProcfsBasedProcessTree.isAvailable()) {
        return;
    }
    containerManager.start();
    File scriptFile = new File(tmpDir, "scriptFile.sh");
    PrintWriter fileWriter = new PrintWriter(scriptFile);
    File processStartFile = new File(tmpDir, "start_file.txt").getAbsoluteFile();
    // So that start file is readable by the
    fileWriter.write("\numask 0");
    // test.
    fileWriter.write("\necho Hello World! > " + processStartFile);
    fileWriter.write("\necho $$ >> " + processStartFile);
    fileWriter.write("\nsleep 15");
    fileWriter.close();
    ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    // ////// Construct the Container-id
    ApplicationId appId = ApplicationId.newInstance(0, 0);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
    URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
    LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
    rsrc_alpha.setResource(resource_alpha);
    rsrc_alpha.setSize(-1);
    rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
    rsrc_alpha.setType(LocalResourceType.FILE);
    rsrc_alpha.setTimestamp(scriptFile.lastModified());
    String destinationFile = "dest_file";
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put(destinationFile, rsrc_alpha);
    containerLaunchContext.setLocalResources(localResources);
    List<String> commands = new ArrayList<String>();
    commands.add("/bin/bash");
    commands.add(scriptFile.getAbsolutePath());
    containerLaunchContext.setCommands(commands);
    Resource r = BuilderUtils.newResource(0, 0);
    ContainerTokenIdentifier containerIdentifier = new ContainerTokenIdentifier(cId, context.getNodeId().toString(), user, r, System.currentTimeMillis() + 120000, 123, DUMMY_RM_IDENTIFIER, Priority.newInstance(0), 0);
    Token containerToken = BuilderUtils.newContainerToken(context.getNodeId(), containerManager.getContext().getContainerTokenSecretManager().createPassword(containerIdentifier), containerIdentifier);
    StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, containerToken);
    List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
    list.add(scRequest);
    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
    containerManager.startContainers(allRequests);
    int timeoutSecs = 0;
    while (!processStartFile.exists() && timeoutSecs++ < 20) {
        Thread.sleep(1000);
        LOG.info("Waiting for process start-file to be created");
    }
    Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists());
    // Now verify the contents of the file
    BufferedReader reader = new BufferedReader(new FileReader(processStartFile));
    Assert.assertEquals("Hello World!", reader.readLine());
    // Get the pid of the process
    String pid = reader.readLine().trim();
    // No more lines
    Assert.assertEquals(null, reader.readLine());
    BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE, 60);
    List<ContainerId> containerIds = new ArrayList<ContainerId>();
    containerIds.add(cId);
    GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
    ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
    Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM, containerStatus.getExitStatus());
    String expectedMsgPattern = "Container \\[pid=" + pid + ",containerID=" + cId + "\\] is running beyond virtual memory limits. Current usage: " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. " + "Killing container.\nDump of the process-tree for " + cId + " :\n";
    Pattern pat = Pattern.compile(expectedMsgPattern);
    Assert.assertEquals("Expected message pattern is: " + expectedMsgPattern + "\n\nObserved message is: " + containerStatus.getDiagnostics(), true, pat.matcher(containerStatus.getDiagnostics()).find());
    // Assert that the process is not alive anymore
    Assert.assertFalse("Process is still alive!", exec.signalContainer(new ContainerSignalContext.Builder().setUser(user).setPid(pid).setSignal(Signal.NULL).build()));
}
Also used : HashMap(java.util.HashMap) GetContainerStatusesRequest(org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.yarn.api.records.Token) URL(org.apache.hadoop.yarn.api.records.URL) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileReader(java.io.FileReader) PrintWriter(java.io.PrintWriter) Path(org.apache.hadoop.fs.Path) StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) Pattern(java.util.regex.Pattern) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) BufferedReader(java.io.BufferedReader) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 12 with StartContainersRequest

use of org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest in project hadoop by apache.

the class TestContainerLaunch method testContainerEnvVariables.

/**
   * See if environment variable is forwarded using sanitizeEnv.
   * @throws Exception
   */
@Test(timeout = 60000)
public void testContainerEnvVariables() throws Exception {
    containerManager.start();
    ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    // ////// Construct the Container-id
    ApplicationId appId = ApplicationId.newInstance(0, 0);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
    Map<String, String> userSetEnv = new HashMap<String, String>();
    userSetEnv.put(Environment.CONTAINER_ID.name(), "user_set_container_id");
    userSetEnv.put(Environment.NM_HOST.name(), "user_set_NM_HOST");
    userSetEnv.put(Environment.NM_PORT.name(), "user_set_NM_PORT");
    userSetEnv.put(Environment.NM_HTTP_PORT.name(), "user_set_NM_HTTP_PORT");
    userSetEnv.put(Environment.LOCAL_DIRS.name(), "user_set_LOCAL_DIR");
    userSetEnv.put(Environment.USER.key(), "user_set_" + Environment.USER.key());
    userSetEnv.put(Environment.LOGNAME.name(), "user_set_LOGNAME");
    userSetEnv.put(Environment.PWD.name(), "user_set_PWD");
    userSetEnv.put(Environment.HOME.name(), "user_set_HOME");
    containerLaunchContext.setEnvironment(userSetEnv);
    File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
    PrintWriter fileWriter = new PrintWriter(scriptFile);
    File processStartFile = new File(tmpDir, "env_vars.txt").getAbsoluteFile();
    if (Shell.WINDOWS) {
        fileWriter.println("@echo " + Environment.CONTAINER_ID.$() + "> " + processStartFile);
        fileWriter.println("@echo " + Environment.NM_HOST.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.NM_PORT.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.NM_HTTP_PORT.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.LOCAL_DIRS.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.USER.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.LOGNAME.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.PWD.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.HOME.$() + ">> " + processStartFile);
        for (String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
            fileWriter.println("@echo %" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName + "%>> " + processStartFile);
        }
        fileWriter.println("@echo " + cId + ">> " + processStartFile);
        fileWriter.println("@ping -n 100 127.0.0.1 >nul");
    } else {
        // So that start file is readable by the test
        fileWriter.write("\numask 0");
        fileWriter.write("\necho $" + Environment.CONTAINER_ID.name() + " > " + processStartFile);
        fileWriter.write("\necho $" + Environment.NM_HOST.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.NM_PORT.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.NM_HTTP_PORT.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.LOCAL_DIRS.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.USER.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.LOGNAME.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.PWD.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.HOME.name() + " >> " + processStartFile);
        for (String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
            fileWriter.write("\necho $" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName + " >> " + processStartFile);
        }
        fileWriter.write("\necho $$ >> " + processStartFile);
        fileWriter.write("\nexec sleep 100");
    }
    fileWriter.close();
    // upload the script file so that the container can run it
    URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
    LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
    rsrc_alpha.setResource(resource_alpha);
    rsrc_alpha.setSize(-1);
    rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
    rsrc_alpha.setType(LocalResourceType.FILE);
    rsrc_alpha.setTimestamp(scriptFile.lastModified());
    String destinationFile = "dest_file";
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put(destinationFile, rsrc_alpha);
    containerLaunchContext.setLocalResources(localResources);
    // set up the rest of the container
    List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
    containerLaunchContext.setCommands(commands);
    StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId, Priority.newInstance(0), 0));
    List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
    list.add(scRequest);
    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
    containerManager.startContainers(allRequests);
    int timeoutSecs = 0;
    while (!processStartFile.exists() && timeoutSecs++ < 20) {
        Thread.sleep(1000);
        LOG.info("Waiting for process start-file to be created");
    }
    Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists());
    // Now verify the contents of the file
    List<String> localDirs = dirsHandler.getLocalDirs();
    List<String> logDirs = dirsHandler.getLogDirs();
    List<Path> appDirs = new ArrayList<Path>(localDirs.size());
    for (String localDir : localDirs) {
        Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE);
        Path userdir = new Path(usersdir, user);
        Path appsdir = new Path(userdir, ContainerLocalizer.APPCACHE);
        appDirs.add(new Path(appsdir, appId.toString()));
    }
    List<String> containerLogDirs = new ArrayList<String>();
    String relativeContainerLogDir = ContainerLaunch.getRelativeContainerLogDir(appId.toString(), cId.toString());
    for (String logDir : logDirs) {
        containerLogDirs.add(logDir + Path.SEPARATOR + relativeContainerLogDir);
    }
    BufferedReader reader = new BufferedReader(new FileReader(processStartFile));
    Assert.assertEquals(cId.toString(), reader.readLine());
    Assert.assertEquals(context.getNodeId().getHost(), reader.readLine());
    Assert.assertEquals(String.valueOf(context.getNodeId().getPort()), reader.readLine());
    Assert.assertEquals(String.valueOf(HTTP_PORT), reader.readLine());
    Assert.assertEquals(StringUtils.join(",", appDirs), reader.readLine());
    Assert.assertEquals(user, reader.readLine());
    Assert.assertEquals(user, reader.readLine());
    String obtainedPWD = reader.readLine();
    boolean found = false;
    for (Path localDir : appDirs) {
        if (new Path(localDir, cId.toString()).toString().equals(obtainedPWD)) {
            found = true;
            break;
        }
    }
    Assert.assertTrue("Wrong local-dir found : " + obtainedPWD, found);
    Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR, YarnConfiguration.DEFAULT_NM_USER_HOME_DIR), reader.readLine());
    for (String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
        Assert.assertEquals(containerManager.getAuxServiceMetaData().get(serviceName), ByteBuffer.wrap(Base64.decodeBase64(reader.readLine().getBytes())));
    }
    Assert.assertEquals(cId.toString(), containerLaunchContext.getEnvironment().get(Environment.CONTAINER_ID.name()));
    Assert.assertEquals(context.getNodeId().getHost(), containerLaunchContext.getEnvironment().get(Environment.NM_HOST.name()));
    Assert.assertEquals(String.valueOf(context.getNodeId().getPort()), containerLaunchContext.getEnvironment().get(Environment.NM_PORT.name()));
    Assert.assertEquals(String.valueOf(HTTP_PORT), containerLaunchContext.getEnvironment().get(Environment.NM_HTTP_PORT.name()));
    Assert.assertEquals(StringUtils.join(",", appDirs), containerLaunchContext.getEnvironment().get(Environment.LOCAL_DIRS.name()));
    Assert.assertEquals(StringUtils.join(",", containerLogDirs), containerLaunchContext.getEnvironment().get(Environment.LOG_DIRS.name()));
    Assert.assertEquals(user, containerLaunchContext.getEnvironment().get(Environment.USER.name()));
    Assert.assertEquals(user, containerLaunchContext.getEnvironment().get(Environment.LOGNAME.name()));
    found = false;
    obtainedPWD = containerLaunchContext.getEnvironment().get(Environment.PWD.name());
    for (Path localDir : appDirs) {
        if (new Path(localDir, cId.toString()).toString().equals(obtainedPWD)) {
            found = true;
            break;
        }
    }
    Assert.assertTrue("Wrong local-dir found : " + obtainedPWD, found);
    Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR, YarnConfiguration.DEFAULT_NM_USER_HOME_DIR), containerLaunchContext.getEnvironment().get(Environment.HOME.name()));
    // Get the pid of the process
    String pid = reader.readLine().trim();
    // No more lines
    Assert.assertEquals(null, reader.readLine());
    // Now test the stop functionality.
    // Assert that the process is alive
    Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid));
    // Once more
    Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid));
    // Now test the stop functionality.
    List<ContainerId> containerIds = new ArrayList<ContainerId>();
    containerIds.add(cId);
    StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds);
    containerManager.stopContainers(stopRequest);
    BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE);
    GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
    ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
    int expectedExitCode = ContainerExitStatus.KILLED_BY_APPMASTER;
    Assert.assertEquals(expectedExitCode, containerStatus.getExitStatus());
    // Assert that the process is not alive anymore
    Assert.assertFalse("Process is still alive!", DefaultContainerExecutor.containerIsAlive(pid));
}
Also used : HashMap(java.util.HashMap) GetContainerStatusesRequest(org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest) ArrayList(java.util.ArrayList) URL(org.apache.hadoop.yarn.api.records.URL) NMContainerStatus(org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileReader(java.io.FileReader) PrintWriter(java.io.PrintWriter) StopContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest) Path(org.apache.hadoop.fs.Path) StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) BufferedReader(java.io.BufferedReader) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) JarFile(java.util.jar.JarFile) File(java.io.File) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 13 with StartContainersRequest

use of org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest in project hadoop by apache.

the class TestContainerManager method testChangeContainerResource.

@Test
public void testChangeContainerResource() throws Exception {
    containerManager.start();
    File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
    PrintWriter fileWriter = new PrintWriter(scriptFile);
    // Construct the Container-id
    ContainerId cId = createContainerId(0);
    if (Shell.WINDOWS) {
        fileWriter.println("@ping -n 100 127.0.0.1 >nul");
    } else {
        fileWriter.write("\numask 0");
        fileWriter.write("\nexec sleep 100");
    }
    fileWriter.close();
    ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
    LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
    rsrc_alpha.setResource(resource_alpha);
    rsrc_alpha.setSize(-1);
    rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
    rsrc_alpha.setType(LocalResourceType.FILE);
    rsrc_alpha.setTimestamp(scriptFile.lastModified());
    String destinationFile = "dest_file";
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put(destinationFile, rsrc_alpha);
    containerLaunchContext.setLocalResources(localResources);
    List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
    containerLaunchContext.setCommands(commands);
    StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager()));
    List<StartContainerRequest> list = new ArrayList<>();
    list.add(scRequest);
    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
    containerManager.startContainers(allRequests);
    // Make sure the container reaches RUNNING state
    BaseContainerManagerTest.waitForNMContainerState(containerManager, cId, org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.RUNNING);
    // Construct container resource increase request,
    List<Token> increaseTokens = new ArrayList<>();
    // Add increase request.
    Resource targetResource = Resource.newInstance(4096, 2);
    Token containerToken = createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, targetResource, context.getContainerTokenSecretManager(), null);
    increaseTokens.add(containerToken);
    IncreaseContainersResourceRequest increaseRequest = IncreaseContainersResourceRequest.newInstance(increaseTokens);
    IncreaseContainersResourceResponse increaseResponse = containerManager.increaseContainersResource(increaseRequest);
    Assert.assertEquals(1, increaseResponse.getSuccessfullyIncreasedContainers().size());
    Assert.assertTrue(increaseResponse.getFailedRequests().isEmpty());
    // Check status
    List<ContainerId> containerIds = new ArrayList<>();
    containerIds.add(cId);
    GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
    ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
    // Check status immediately as resource increase is blocking
    assertEquals(targetResource, containerStatus.getCapability());
    // Simulate a decrease request
    List<org.apache.hadoop.yarn.api.records.Container> containersToDecrease = new ArrayList<>();
    targetResource = Resource.newInstance(2048, 2);
    org.apache.hadoop.yarn.api.records.Container decreasedContainer = org.apache.hadoop.yarn.api.records.Container.newInstance(cId, null, null, targetResource, null, null);
    containersToDecrease.add(decreasedContainer);
    containerManager.handle(new CMgrDecreaseContainersResourceEvent(containersToDecrease));
    // Check status with retry
    containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
    int retry = 0;
    while (!targetResource.equals(containerStatus.getCapability()) && (retry++ < 5)) {
        Thread.sleep(200);
        containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
    }
    assertEquals(targetResource, containerStatus.getCapability());
}
Also used : HashMap(java.util.HashMap) GetContainerStatusesRequest(org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.yarn.api.records.Token) URL(org.apache.hadoop.yarn.api.records.URL) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) IncreaseContainersResourceResponse(org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse) PrintWriter(java.io.PrintWriter) Path(org.apache.hadoop.fs.Path) StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) CMgrDecreaseContainersResourceEvent(org.apache.hadoop.yarn.server.nodemanager.CMgrDecreaseContainersResourceEvent) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) IncreaseContainersResourceRequest(org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) File(java.io.File) Test(org.junit.Test)

Example 14 with StartContainersRequest

use of org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest in project hadoop by apache.

the class TestContainerManager method testContainerLaunchAndStop.

//@Test
public void testContainerLaunchAndStop() throws IOException, InterruptedException, YarnException {
    containerManager.start();
    File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
    PrintWriter fileWriter = new PrintWriter(scriptFile);
    File processStartFile = new File(tmpDir, "start_file.txt").getAbsoluteFile();
    // ////// Construct the Container-id
    ContainerId cId = createContainerId(0);
    if (Shell.WINDOWS) {
        fileWriter.println("@echo Hello World!> " + processStartFile);
        fileWriter.println("@echo " + cId + ">> " + processStartFile);
        fileWriter.println("@ping -n 100 127.0.0.1 >nul");
    } else {
        // So that start file is readable by the test
        fileWriter.write("\numask 0");
        fileWriter.write("\necho Hello World! > " + processStartFile);
        fileWriter.write("\necho $$ >> " + processStartFile);
        fileWriter.write("\nexec sleep 100");
    }
    fileWriter.close();
    ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
    LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
    rsrc_alpha.setResource(resource_alpha);
    rsrc_alpha.setSize(-1);
    rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
    rsrc_alpha.setType(LocalResourceType.FILE);
    rsrc_alpha.setTimestamp(scriptFile.lastModified());
    String destinationFile = "dest_file";
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put(destinationFile, rsrc_alpha);
    containerLaunchContext.setLocalResources(localResources);
    List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
    containerLaunchContext.setCommands(commands);
    StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager()));
    List<StartContainerRequest> list = new ArrayList<>();
    list.add(scRequest);
    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
    containerManager.startContainers(allRequests);
    int timeoutSecs = 0;
    while (!processStartFile.exists() && timeoutSecs++ < 20) {
        Thread.sleep(1000);
        LOG.info("Waiting for process start-file to be created");
    }
    Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists());
    // Now verify the contents of the file
    BufferedReader reader = new BufferedReader(new FileReader(processStartFile));
    Assert.assertEquals("Hello World!", reader.readLine());
    // Get the pid of the process
    String pid = reader.readLine().trim();
    // No more lines
    Assert.assertEquals(null, reader.readLine());
    // Now test the stop functionality.
    // Assert that the process is alive
    Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid));
    // Once more
    Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid));
    List<ContainerId> containerIds = new ArrayList<>();
    containerIds.add(cId);
    StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds);
    containerManager.stopContainers(stopRequest);
    BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE);
    GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
    ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
    int expectedExitCode = ContainerExitStatus.KILLED_BY_APPMASTER;
    Assert.assertEquals(expectedExitCode, containerStatus.getExitStatus());
    // Assert that the process is not alive anymore
    Assert.assertFalse("Process is still alive!", DefaultContainerExecutor.containerIsAlive(pid));
}
Also used : Path(org.apache.hadoop.fs.Path) StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) HashMap(java.util.HashMap) GetContainerStatusesRequest(org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest) ArrayList(java.util.ArrayList) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) URL(org.apache.hadoop.yarn.api.records.URL) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) BufferedReader(java.io.BufferedReader) FileReader(java.io.FileReader) File(java.io.File) PrintWriter(java.io.PrintWriter) StopContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest)

Example 15 with StartContainersRequest

use of org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest in project hadoop by apache.

the class TestContainerManager method testIncreaseContainerResourceWithInvalidResource.

@Test
public void testIncreaseContainerResourceWithInvalidResource() throws Exception {
    containerManager.start();
    File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
    PrintWriter fileWriter = new PrintWriter(scriptFile);
    // Construct the Container-id
    ContainerId cId = createContainerId(0);
    if (Shell.WINDOWS) {
        fileWriter.println("@ping -n 100 127.0.0.1 >nul");
    } else {
        fileWriter.write("\numask 0");
        fileWriter.write("\nexec sleep 100");
    }
    fileWriter.close();
    ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
    LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
    rsrc_alpha.setResource(resource_alpha);
    rsrc_alpha.setSize(-1);
    rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
    rsrc_alpha.setType(LocalResourceType.FILE);
    rsrc_alpha.setTimestamp(scriptFile.lastModified());
    String destinationFile = "dest_file";
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put(destinationFile, rsrc_alpha);
    containerLaunchContext.setLocalResources(localResources);
    List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
    containerLaunchContext.setCommands(commands);
    StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager()));
    List<StartContainerRequest> list = new ArrayList<>();
    list.add(scRequest);
    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
    containerManager.startContainers(allRequests);
    // Make sure the container reaches RUNNING state
    BaseContainerManagerTest.waitForNMContainerState(containerManager, cId, org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.RUNNING);
    // Construct container resource increase request,
    List<Token> increaseTokens = new ArrayList<>();
    // Add increase request. The increase request should fail
    // as the current resource does not fit in the target resource
    Token containerToken = createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, Resource.newInstance(512, 1), context.getContainerTokenSecretManager(), null);
    increaseTokens.add(containerToken);
    IncreaseContainersResourceRequest increaseRequest = IncreaseContainersResourceRequest.newInstance(increaseTokens);
    IncreaseContainersResourceResponse increaseResponse = containerManager.increaseContainersResource(increaseRequest);
    // Check response
    Assert.assertEquals(0, increaseResponse.getSuccessfullyIncreasedContainers().size());
    Assert.assertEquals(1, increaseResponse.getFailedRequests().size());
    for (Map.Entry<ContainerId, SerializedException> entry : increaseResponse.getFailedRequests().entrySet()) {
        if (cId.equals(entry.getKey())) {
            Assert.assertNotNull("Failed message", entry.getValue().getMessage());
            Assert.assertTrue(entry.getValue().getMessage().contains("The target resource " + Resource.newInstance(512, 1).toString() + " is smaller than the current resource " + Resource.newInstance(1024, 1)));
        } else {
            throw new YarnException("Received failed request from wrong" + " container: " + entry.getKey().toString());
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) HashMap(java.util.HashMap) SerializedException(org.apache.hadoop.yarn.api.records.SerializedException) ArrayList(java.util.ArrayList) IncreaseContainersResourceRequest(org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest) Token(org.apache.hadoop.yarn.api.records.Token) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) URL(org.apache.hadoop.yarn.api.records.URL) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) File(java.io.File) Map(java.util.Map) HashMap(java.util.HashMap) IncreaseContainersResourceResponse(org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse) PrintWriter(java.io.PrintWriter) Test(org.junit.Test)

Aggregations

StartContainersRequest (org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)43 StartContainerRequest (org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest)40 ArrayList (java.util.ArrayList)38 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)37 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)34 Test (org.junit.Test)31 GetContainerStatusesRequest (org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest)21 HashMap (java.util.HashMap)19 ContainerStatus (org.apache.hadoop.yarn.api.records.ContainerStatus)18 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)16 Path (org.apache.hadoop.fs.Path)15 BaseContainerManagerTest (org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest)15 File (java.io.File)14 URL (org.apache.hadoop.yarn.api.records.URL)14 PrintWriter (java.io.PrintWriter)13 Token (org.apache.hadoop.yarn.api.records.Token)11 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)10 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)10 StopContainersRequest (org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest)9 ContainerManagementProtocol (org.apache.hadoop.yarn.api.ContainerManagementProtocol)8