Search in sources :

Example 6 with Token

use of org.apache.hadoop.yarn.api.records.Token in project hadoop by apache.

the class TestNMLeveldbStateStoreService method createContainerRequest.

private StartContainerRequest createContainerRequest(ContainerId containerId) {
    LocalResource lrsrc = LocalResource.newInstance(URL.newInstance("hdfs", "somehost", 12345, "/some/path/to/rsrc"), LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, 123L, 1234567890L);
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put("rsrc", lrsrc);
    Map<String, String> env = new HashMap<String, String>();
    env.put("somevar", "someval");
    List<String> containerCmds = new ArrayList<String>();
    containerCmds.add("somecmd");
    containerCmds.add("somearg");
    Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
    serviceData.put("someservice", ByteBuffer.wrap(new byte[] { 0x1, 0x2, 0x3 }));
    ByteBuffer containerTokens = ByteBuffer.wrap(new byte[] { 0x7, 0x8, 0x9, 0xa });
    Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>();
    acls.put(ApplicationAccessType.VIEW_APP, "viewuser");
    acls.put(ApplicationAccessType.MODIFY_APP, "moduser");
    ContainerLaunchContext clc = ContainerLaunchContext.newInstance(localResources, env, containerCmds, serviceData, containerTokens, acls);
    Resource containerRsrc = Resource.newInstance(1357, 3);
    ContainerTokenIdentifier containerTokenId = new ContainerTokenIdentifier(containerId, "host", "user", containerRsrc, 9876543210L, 42, 2468, Priority.newInstance(7), 13579);
    Token containerToken = Token.newInstance(containerTokenId.getBytes(), ContainerTokenIdentifier.KIND.toString(), "password".getBytes(), "tokenservice");
    return StartContainerRequest.newInstance(clc, containerToken);
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) Token(org.apache.hadoop.yarn.api.records.Token) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ByteBuffer(java.nio.ByteBuffer) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType)

Example 7 with Token

use of org.apache.hadoop.yarn.api.records.Token in project hadoop by apache.

the class TestContainersMonitor method testContainerKillOnMemoryOverflow.

@Test
public void testContainerKillOnMemoryOverflow() throws IOException, InterruptedException, YarnException {
    if (!ProcfsBasedProcessTree.isAvailable()) {
        return;
    }
    containerManager.start();
    File scriptFile = new File(tmpDir, "scriptFile.sh");
    PrintWriter fileWriter = new PrintWriter(scriptFile);
    File processStartFile = new File(tmpDir, "start_file.txt").getAbsoluteFile();
    // So that start file is readable by the
    fileWriter.write("\numask 0");
    // test.
    fileWriter.write("\necho Hello World! > " + processStartFile);
    fileWriter.write("\necho $$ >> " + processStartFile);
    fileWriter.write("\nsleep 15");
    fileWriter.close();
    ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    // ////// Construct the Container-id
    ApplicationId appId = ApplicationId.newInstance(0, 0);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
    URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
    LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
    rsrc_alpha.setResource(resource_alpha);
    rsrc_alpha.setSize(-1);
    rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
    rsrc_alpha.setType(LocalResourceType.FILE);
    rsrc_alpha.setTimestamp(scriptFile.lastModified());
    String destinationFile = "dest_file";
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put(destinationFile, rsrc_alpha);
    containerLaunchContext.setLocalResources(localResources);
    List<String> commands = new ArrayList<String>();
    commands.add("/bin/bash");
    commands.add(scriptFile.getAbsolutePath());
    containerLaunchContext.setCommands(commands);
    Resource r = BuilderUtils.newResource(0, 0);
    ContainerTokenIdentifier containerIdentifier = new ContainerTokenIdentifier(cId, context.getNodeId().toString(), user, r, System.currentTimeMillis() + 120000, 123, DUMMY_RM_IDENTIFIER, Priority.newInstance(0), 0);
    Token containerToken = BuilderUtils.newContainerToken(context.getNodeId(), containerManager.getContext().getContainerTokenSecretManager().createPassword(containerIdentifier), containerIdentifier);
    StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, containerToken);
    List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
    list.add(scRequest);
    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
    containerManager.startContainers(allRequests);
    int timeoutSecs = 0;
    while (!processStartFile.exists() && timeoutSecs++ < 20) {
        Thread.sleep(1000);
        LOG.info("Waiting for process start-file to be created");
    }
    Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists());
    // Now verify the contents of the file
    BufferedReader reader = new BufferedReader(new FileReader(processStartFile));
    Assert.assertEquals("Hello World!", reader.readLine());
    // Get the pid of the process
    String pid = reader.readLine().trim();
    // No more lines
    Assert.assertEquals(null, reader.readLine());
    BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE, 60);
    List<ContainerId> containerIds = new ArrayList<ContainerId>();
    containerIds.add(cId);
    GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
    ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
    Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM, containerStatus.getExitStatus());
    String expectedMsgPattern = "Container \\[pid=" + pid + ",containerID=" + cId + "\\] is running beyond virtual memory limits. Current usage: " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. " + "Killing container.\nDump of the process-tree for " + cId + " :\n";
    Pattern pat = Pattern.compile(expectedMsgPattern);
    Assert.assertEquals("Expected message pattern is: " + expectedMsgPattern + "\n\nObserved message is: " + containerStatus.getDiagnostics(), true, pat.matcher(containerStatus.getDiagnostics()).find());
    // Assert that the process is not alive anymore
    Assert.assertFalse("Process is still alive!", exec.signalContainer(new ContainerSignalContext.Builder().setUser(user).setPid(pid).setSignal(Signal.NULL).build()));
}
Also used : HashMap(java.util.HashMap) GetContainerStatusesRequest(org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.yarn.api.records.Token) URL(org.apache.hadoop.yarn.api.records.URL) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileReader(java.io.FileReader) PrintWriter(java.io.PrintWriter) Path(org.apache.hadoop.fs.Path) StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) Pattern(java.util.regex.Pattern) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) BufferedReader(java.io.BufferedReader) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 8 with Token

use of org.apache.hadoop.yarn.api.records.Token in project hadoop by apache.

the class TestNMContainerTokenSecretManager method createContainerTokenId.

private static ContainerTokenIdentifier createContainerTokenId(ContainerId cid, NodeId nodeId, String user, NMContainerTokenSecretManager secretMgr) throws IOException {
    long rmid = cid.getApplicationAttemptId().getApplicationId().getClusterTimestamp();
    ContainerTokenIdentifier ctid = new ContainerTokenIdentifier(cid, nodeId.toString(), user, BuilderUtils.newResource(1024, 1), System.currentTimeMillis() + 100000L, secretMgr.getCurrentKey().getKeyId(), rmid, Priority.newInstance(0), 0);
    Token token = BuilderUtils.newContainerToken(nodeId, secretMgr.createPassword(ctid), ctid);
    return BuilderUtils.newContainerTokenIdentifier(token);
}
Also used : Token(org.apache.hadoop.yarn.api.records.Token) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier)

Example 9 with Token

use of org.apache.hadoop.yarn.api.records.Token in project hadoop by apache.

the class TestOpportunisticContainerAllocation method testMixedAllocationAndRelease.

@Test(timeout = 60000)
public void testMixedAllocationAndRelease() throws YarnException, IOException {
    // setup container request
    assertEquals(0, amClient.ask.size());
    assertEquals(0, amClient.release.size());
    amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
    amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
    int containersRequestedNode = amClient.getTable(0).get(priority, node, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
    int containersRequestedRack = amClient.getTable(0).get(priority, rack, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
    int containersRequestedAny = amClient.getTable(0).get(priority, ResourceRequest.ANY, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
    int oppContainersRequestedAny = amClient.getTable(0).get(priority2, ResourceRequest.ANY, ExecutionType.OPPORTUNISTIC, capability).remoteRequest.getNumContainers();
    assertEquals(4, containersRequestedNode);
    assertEquals(4, containersRequestedRack);
    assertEquals(4, containersRequestedAny);
    assertEquals(2, oppContainersRequestedAny);
    assertEquals(4, amClient.ask.size());
    assertEquals(0, amClient.release.size());
    amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
    containersRequestedNode = amClient.getTable(0).get(priority, node, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
    containersRequestedRack = amClient.getTable(0).get(priority, rack, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
    containersRequestedAny = amClient.getTable(0).get(priority, ResourceRequest.ANY, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
    oppContainersRequestedAny = amClient.getTable(0).get(priority2, ResourceRequest.ANY, ExecutionType.OPPORTUNISTIC, capability).remoteRequest.getNumContainers();
    assertEquals(2, containersRequestedNode);
    assertEquals(2, containersRequestedRack);
    assertEquals(2, containersRequestedAny);
    assertEquals(1, oppContainersRequestedAny);
    assertEquals(4, amClient.ask.size());
    assertEquals(0, amClient.release.size());
    // RM should allocate container within 2 calls to allocate()
    int allocatedContainerCount = 0;
    int allocatedOpportContainerCount = 0;
    int iterationsLeft = 50;
    Set<ContainerId> releases = new TreeSet<>();
    amClient.getNMTokenCache().clearCache();
    Assert.assertEquals(0, amClient.getNMTokenCache().numberOfTokensInCache());
    HashMap<String, Token> receivedNMTokens = new HashMap<>();
    while (allocatedContainerCount < containersRequestedAny + oppContainersRequestedAny && iterationsLeft-- > 0) {
        AllocateResponse allocResponse = amClient.allocate(0.1f);
        assertEquals(0, amClient.ask.size());
        assertEquals(0, amClient.release.size());
        allocatedContainerCount += allocResponse.getAllocatedContainers().size();
        for (Container container : allocResponse.getAllocatedContainers()) {
            if (container.getExecutionType() == ExecutionType.OPPORTUNISTIC) {
                allocatedOpportContainerCount++;
            }
            ContainerId rejectContainerId = container.getId();
            releases.add(rejectContainerId);
        }
        for (NMToken token : allocResponse.getNMTokens()) {
            String nodeID = token.getNodeId().toString();
            receivedNMTokens.put(nodeID, token.getToken());
        }
        if (allocatedContainerCount < containersRequestedAny) {
            // sleep to let NM's heartbeat to RM and trigger allocations
            sleep(100);
        }
    }
    assertEquals(containersRequestedAny + oppContainersRequestedAny, allocatedContainerCount);
    assertEquals(oppContainersRequestedAny, allocatedOpportContainerCount);
    for (ContainerId rejectContainerId : releases) {
        amClient.releaseAssignedContainer(rejectContainerId);
    }
    assertEquals(3, amClient.release.size());
    assertEquals(0, amClient.ask.size());
    // need to tell the AMRMClient that we don't need these resources anymore
    amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
    assertEquals(4, amClient.ask.size());
    iterationsLeft = 3;
    // do a few iterations to ensure RM is not going to send new containers
    while (iterationsLeft-- > 0) {
        // inform RM of rejection
        AllocateResponse allocResponse = amClient.allocate(0.1f);
        // RM did not send new containers because AM does not need any
        assertEquals(0, allocResponse.getAllocatedContainers().size());
        if (allocResponse.getCompletedContainersStatuses().size() > 0) {
            for (ContainerStatus cStatus : allocResponse.getCompletedContainersStatuses()) {
                if (releases.contains(cStatus.getContainerId())) {
                    assertEquals(cStatus.getState(), ContainerState.COMPLETE);
                    assertEquals(-100, cStatus.getExitStatus());
                    releases.remove(cStatus.getContainerId());
                }
            }
        }
        if (iterationsLeft > 0) {
            // sleep to make sure NM's heartbeat
            sleep(100);
        }
    }
    assertEquals(0, amClient.ask.size());
    assertEquals(0, amClient.release.size());
}
Also used : AMRMClient(org.apache.hadoop.yarn.client.api.AMRMClient) NMToken(org.apache.hadoop.yarn.api.records.NMToken) HashMap(java.util.HashMap) NMToken(org.apache.hadoop.yarn.api.records.NMToken) Token(org.apache.hadoop.yarn.api.records.Token) AllocateResponse(org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse) UpdatedContainer(org.apache.hadoop.yarn.api.records.UpdatedContainer) Container(org.apache.hadoop.yarn.api.records.Container) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TreeSet(java.util.TreeSet) Test(org.junit.Test)

Example 10 with Token

use of org.apache.hadoop.yarn.api.records.Token in project hadoop by apache.

the class TestYarnApiClasses method testRenewDelegationTokenRequestPBImpl.

/**
  * Test RenewDelegationTokenRequestPBImpl.
  * Test a transformation to prototype and back
  */
@Test
public void testRenewDelegationTokenRequestPBImpl() {
    Token token = getDelegationToken();
    RenewDelegationTokenRequestPBImpl original = new RenewDelegationTokenRequestPBImpl();
    original.setDelegationToken(token);
    RenewDelegationTokenRequestProto protoType = original.getProto();
    RenewDelegationTokenRequestPBImpl copy = new RenewDelegationTokenRequestPBImpl(protoType);
    assertNotNull(copy.getDelegationToken());
    //compare source and converted
    assertEquals(token, copy.getDelegationToken());
}
Also used : RenewDelegationTokenRequestPBImpl(org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl) RenewDelegationTokenRequestProto(org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto) Token(org.apache.hadoop.yarn.api.records.Token) Test(org.junit.Test)

Aggregations

Token (org.apache.hadoop.yarn.api.records.Token)53 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)29 Test (org.junit.Test)24 ArrayList (java.util.ArrayList)19 HashMap (java.util.HashMap)17 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)16 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)15 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)14 StartContainerRequest (org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest)13 ContainerTokenIdentifier (org.apache.hadoop.yarn.security.ContainerTokenIdentifier)13 Resource (org.apache.hadoop.yarn.api.records.Resource)12 StartContainersRequest (org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)11 ContainerStatus (org.apache.hadoop.yarn.api.records.ContainerStatus)11 InetSocketAddress (java.net.InetSocketAddress)10 NMToken (org.apache.hadoop.yarn.api.records.NMToken)10 NodeId (org.apache.hadoop.yarn.api.records.NodeId)10 Container (org.apache.hadoop.yarn.api.records.Container)9 InvalidToken (org.apache.hadoop.security.token.SecretManager.InvalidToken)8 AllocateResponse (org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse)8 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)8