Search in sources :

Example 11 with StartContainerRequest

use of org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest in project hadoop by apache.

the class TestContainersMonitor method testContainerMonitor.

// Test that even if VMEM_PMEM_CHECK is not enabled, container monitor will
// run.
@Test
public void testContainerMonitor() throws Exception {
    conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
    conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
    containerManager.start();
    ContainerLaunchContext context = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    context.setCommands(Arrays.asList("sleep 6"));
    ContainerId cId = createContainerId(1705);
    // start the container
    StartContainerRequest scRequest = StartContainerRequest.newInstance(context, createContainerToken(cId, DUMMY_RM_IDENTIFIER, this.context.getNodeId(), user, this.context.getContainerTokenSecretManager()));
    StartContainersRequest allRequests = StartContainersRequest.newInstance(Arrays.asList(scRequest));
    containerManager.startContainers(allRequests);
    BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.RUNNING);
    Thread.sleep(2000);
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        public Boolean get() {
            try {
                return containerManager.getContainerStatuses(GetContainerStatusesRequest.newInstance(Arrays.asList(cId))).getContainerStatuses().get(0).getHost() != null;
            } catch (Exception e) {
                return false;
            }
        }
    }, 300, 10000);
}
Also used : StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) IOException(java.io.IOException) UnsupportedFileSystemException(org.apache.hadoop.fs.UnsupportedFileSystemException) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 12 with StartContainerRequest

use of org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest in project hadoop by apache.

the class TestContainersMonitor method testContainerKillOnMemoryOverflow.

@Test
public void testContainerKillOnMemoryOverflow() throws IOException, InterruptedException, YarnException {
    if (!ProcfsBasedProcessTree.isAvailable()) {
        return;
    }
    containerManager.start();
    File scriptFile = new File(tmpDir, "scriptFile.sh");
    PrintWriter fileWriter = new PrintWriter(scriptFile);
    File processStartFile = new File(tmpDir, "start_file.txt").getAbsoluteFile();
    // So that start file is readable by the
    fileWriter.write("\numask 0");
    // test.
    fileWriter.write("\necho Hello World! > " + processStartFile);
    fileWriter.write("\necho $$ >> " + processStartFile);
    fileWriter.write("\nsleep 15");
    fileWriter.close();
    ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    // ////// Construct the Container-id
    ApplicationId appId = ApplicationId.newInstance(0, 0);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
    URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
    LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
    rsrc_alpha.setResource(resource_alpha);
    rsrc_alpha.setSize(-1);
    rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
    rsrc_alpha.setType(LocalResourceType.FILE);
    rsrc_alpha.setTimestamp(scriptFile.lastModified());
    String destinationFile = "dest_file";
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put(destinationFile, rsrc_alpha);
    containerLaunchContext.setLocalResources(localResources);
    List<String> commands = new ArrayList<String>();
    commands.add("/bin/bash");
    commands.add(scriptFile.getAbsolutePath());
    containerLaunchContext.setCommands(commands);
    Resource r = BuilderUtils.newResource(0, 0);
    ContainerTokenIdentifier containerIdentifier = new ContainerTokenIdentifier(cId, context.getNodeId().toString(), user, r, System.currentTimeMillis() + 120000, 123, DUMMY_RM_IDENTIFIER, Priority.newInstance(0), 0);
    Token containerToken = BuilderUtils.newContainerToken(context.getNodeId(), containerManager.getContext().getContainerTokenSecretManager().createPassword(containerIdentifier), containerIdentifier);
    StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, containerToken);
    List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
    list.add(scRequest);
    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
    containerManager.startContainers(allRequests);
    int timeoutSecs = 0;
    while (!processStartFile.exists() && timeoutSecs++ < 20) {
        Thread.sleep(1000);
        LOG.info("Waiting for process start-file to be created");
    }
    Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists());
    // Now verify the contents of the file
    BufferedReader reader = new BufferedReader(new FileReader(processStartFile));
    Assert.assertEquals("Hello World!", reader.readLine());
    // Get the pid of the process
    String pid = reader.readLine().trim();
    // No more lines
    Assert.assertEquals(null, reader.readLine());
    BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE, 60);
    List<ContainerId> containerIds = new ArrayList<ContainerId>();
    containerIds.add(cId);
    GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
    ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
    Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM, containerStatus.getExitStatus());
    String expectedMsgPattern = "Container \\[pid=" + pid + ",containerID=" + cId + "\\] is running beyond virtual memory limits. Current usage: " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. " + "Killing container.\nDump of the process-tree for " + cId + " :\n";
    Pattern pat = Pattern.compile(expectedMsgPattern);
    Assert.assertEquals("Expected message pattern is: " + expectedMsgPattern + "\n\nObserved message is: " + containerStatus.getDiagnostics(), true, pat.matcher(containerStatus.getDiagnostics()).find());
    // Assert that the process is not alive anymore
    Assert.assertFalse("Process is still alive!", exec.signalContainer(new ContainerSignalContext.Builder().setUser(user).setPid(pid).setSignal(Signal.NULL).build()));
}
Also used : HashMap(java.util.HashMap) GetContainerStatusesRequest(org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.yarn.api.records.Token) URL(org.apache.hadoop.yarn.api.records.URL) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileReader(java.io.FileReader) PrintWriter(java.io.PrintWriter) Path(org.apache.hadoop.fs.Path) StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) Pattern(java.util.regex.Pattern) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) BufferedReader(java.io.BufferedReader) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 13 with StartContainerRequest

use of org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest in project hadoop by apache.

the class Application method assign.

private synchronized void assign(SchedulerRequestKey schedulerKey, NodeType type, List<Container> containers) throws IOException, YarnException {
    for (Iterator<Container> i = containers.iterator(); i.hasNext(); ) {
        Container container = i.next();
        String host = container.getNodeId().toString();
        if (Resources.equals(requestSpec.get(schedulerKey), container.getResource())) {
            // See which task can use this container
            for (Iterator<Task> t = tasks.get(schedulerKey).iterator(); t.hasNext(); ) {
                Task task = t.next();
                if (task.getState() == State.PENDING && task.canSchedule(type, host)) {
                    NodeManager nodeManager = getNodeManager(host);
                    task.start(nodeManager, container.getId());
                    i.remove();
                    // Track application resource usage
                    Resources.addTo(used, container.getResource());
                    LOG.info("Assigned container (" + container + ") of type " + type + " to task " + task.getTaskId() + " at priority " + schedulerKey.getPriority() + " on node " + nodeManager.getHostName() + ", currently using " + used + " resources");
                    // Update resource requests
                    updateResourceRequests(requests.get(schedulerKey), type, task);
                    // Launch the container
                    StartContainerRequest scRequest = StartContainerRequest.newInstance(createCLC(), container.getContainerToken());
                    List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
                    list.add(scRequest);
                    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
                    nodeManager.startContainers(allRequests);
                    break;
                }
            }
        }
    }
}
Also used : StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) Container(org.apache.hadoop.yarn.api.records.Container) ArrayList(java.util.ArrayList) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest)

Example 14 with StartContainerRequest

use of org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest in project hadoop by apache.

the class NodeManager method startContainers.

@Override
public synchronized StartContainersResponse startContainers(StartContainersRequest requests) throws YarnException {
    for (StartContainerRequest request : requests.getStartContainerRequests()) {
        Token containerToken = request.getContainerToken();
        ContainerTokenIdentifier tokenId = null;
        try {
            tokenId = BuilderUtils.newContainerTokenIdentifier(containerToken);
        } catch (IOException e) {
            throw RPCUtil.getRemoteException(e);
        }
        ContainerId containerID = tokenId.getContainerID();
        ApplicationId applicationId = containerID.getApplicationAttemptId().getApplicationId();
        List<Container> applicationContainers = containers.get(applicationId);
        if (applicationContainers == null) {
            applicationContainers = new ArrayList<Container>();
            containers.put(applicationId, applicationContainers);
        }
        // Sanity check
        for (Container container : applicationContainers) {
            if (container.getId().compareTo(containerID) == 0) {
                throw new IllegalStateException("Container " + containerID + " already setup on node " + containerManagerAddress);
            }
        }
        Container container = BuilderUtils.newContainer(containerID, this.nodeId, nodeHttpAddress, // DKDC - Doesn't matter
        tokenId.getResource(), // DKDC - Doesn't matter
        null, // DKDC - Doesn't matter
        null);
        ContainerStatus containerStatus = BuilderUtils.newContainerStatus(container.getId(), ContainerState.NEW, "", -1000, container.getResource());
        applicationContainers.add(container);
        containerStatusMap.put(container, containerStatus);
        Resources.subtractFrom(available, tokenId.getResource());
        Resources.addTo(used, tokenId.getResource());
        if (LOG.isDebugEnabled()) {
            LOG.debug("startContainer:" + " node=" + containerManagerAddress + " application=" + applicationId + " container=" + container + " available=" + available + " used=" + used);
        }
    }
    StartContainersResponse response = StartContainersResponse.newInstance(null, null, null);
    return response;
}
Also used : Container(org.apache.hadoop.yarn.api.records.Container) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) StartContainersResponse(org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) Token(org.apache.hadoop.yarn.api.records.Token) IOException(java.io.IOException) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier)

Example 15 with StartContainerRequest

use of org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest in project hadoop by apache.

the class StartContainersRequestPBImpl method addLocalRequestsToProto.

private void addLocalRequestsToProto() {
    maybeInitBuilder();
    builder.clearStartContainerRequest();
    List<StartContainerRequestProto> protoList = new ArrayList<StartContainerRequestProto>();
    for (StartContainerRequest r : this.requests) {
        protoList.add(convertToProtoFormat(r));
    }
    builder.addAllStartContainerRequest(protoList);
}
Also used : StartContainerRequestProto(org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto) ArrayList(java.util.ArrayList) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest)

Aggregations

StartContainerRequest (org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest)50 ArrayList (java.util.ArrayList)42 StartContainersRequest (org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)40 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)40 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)39 Test (org.junit.Test)30 HashMap (java.util.HashMap)22 GetContainerStatusesRequest (org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest)20 ContainerStatus (org.apache.hadoop.yarn.api.records.ContainerStatus)19 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)17 Path (org.apache.hadoop.fs.Path)15 URL (org.apache.hadoop.yarn.api.records.URL)15 BaseContainerManagerTest (org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest)15 File (java.io.File)14 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)14 PrintWriter (java.io.PrintWriter)13 Token (org.apache.hadoop.yarn.api.records.Token)13 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)13 StartContainersResponse (org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse)10 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)10