use of org.apache.hadoop.yarn.security.ContainerTokenIdentifier in project hadoop by apache.
the class TestContainerManagerSecurity method testContainerTokenWithEpoch.
/**
* This tests whether a containerId is serialized/deserialized with epoch.
*
* @throws IOException
* @throws InterruptedException
* @throws YarnException
*/
private void testContainerTokenWithEpoch(Configuration conf) throws IOException, InterruptedException, YarnException {
LOG.info("Running test for serializing/deserializing containerIds");
NMTokenSecretManagerInRM nmTokenSecretManagerInRM = yarnCluster.getResourceManager().getRMContext().getNMTokenSecretManager();
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
ContainerId cId = ContainerId.newContainerId(appAttemptId, (5L << 40) | 3L);
NodeManager nm = yarnCluster.getNodeManager(0);
NMTokenSecretManagerInNM nmTokenSecretManagerInNM = nm.getNMContext().getNMTokenSecretManager();
String user = "test";
waitForNMToReceiveNMTokenKey(nmTokenSecretManagerInNM, nm);
NodeId nodeId = nm.getNMContext().getNodeId();
// Both id should be equal.
Assert.assertEquals(nmTokenSecretManagerInNM.getCurrentKey().getKeyId(), nmTokenSecretManagerInRM.getCurrentKey().getKeyId());
// Creating a normal Container Token
RMContainerTokenSecretManager containerTokenSecretManager = yarnCluster.getResourceManager().getRMContext().getContainerTokenSecretManager();
Resource r = Resource.newInstance(1230, 2);
Token containerToken = containerTokenSecretManager.createContainerToken(cId, 0, nodeId, user, r, Priority.newInstance(0), 0);
ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier();
byte[] tokenIdentifierContent = containerToken.getIdentifier().array();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
containerTokenIdentifier.readFields(dib);
Assert.assertEquals(cId, containerTokenIdentifier.getContainerID());
Assert.assertEquals(cId.toString(), containerTokenIdentifier.getContainerID().toString());
Token nmToken = nmTokenSecretManagerInRM.createNMToken(appAttemptId, nodeId, user);
YarnRPC rpc = YarnRPC.create(conf);
testStartContainer(rpc, appAttemptId, nodeId, containerToken, nmToken, false);
List<ContainerId> containerIds = new LinkedList<ContainerId>();
containerIds.add(cId);
ContainerManagementProtocol proxy = getContainerManagementProtocolProxy(rpc, nmToken, nodeId, user);
GetContainerStatusesResponse res = proxy.getContainerStatuses(GetContainerStatusesRequest.newInstance(containerIds));
Assert.assertNotNull(res.getContainerStatuses().get(0));
Assert.assertEquals(cId, res.getContainerStatuses().get(0).getContainerId());
Assert.assertEquals(cId.toString(), res.getContainerStatuses().get(0).getContainerId().toString());
}
use of org.apache.hadoop.yarn.security.ContainerTokenIdentifier in project hadoop by apache.
the class TestContainerAllocation method getLogAggregationContextFromContainerToken.
private LogAggregationContext getLogAggregationContextFromContainerToken(MockRM rm1, MockNM nm1, LogAggregationContext logAggregationContext) throws Exception {
RMApp app2 = rm1.submitApp(200, logAggregationContext);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);
nm1.nodeHeartbeat(true);
// request a container.
am2.allocate("127.0.0.1", 512, 1, new ArrayList<ContainerId>());
ContainerId containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2);
rm1.waitForState(nm1, containerId, RMContainerState.ALLOCATED);
// acquire the container.
List<Container> containers = am2.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>()).getAllocatedContainers();
Assert.assertEquals(containerId, containers.get(0).getId());
// container token is generated.
Assert.assertNotNull(containers.get(0).getContainerToken());
ContainerTokenIdentifier token = BuilderUtils.newContainerTokenIdentifier(containers.get(0).getContainerToken());
return token.getLogAggregationContext();
}
use of org.apache.hadoop.yarn.security.ContainerTokenIdentifier in project hadoop by apache.
the class TestApplicationMasterService method testRMIdentifierOnContainerAllocation.
@Test(timeout = 3000000)
public void testRMIdentifierOnContainerAllocation() throws Exception {
MockRM rm = new MockRM(conf);
rm.start();
// Register node1
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
// Submit an application
RMApp app1 = rm.submitApp(2048);
// kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
am1.addRequests(new String[] { "127.0.0.1" }, GB, 1, 1);
// send the request
AllocateResponse alloc1Response = am1.schedule();
// kick the scheduler
nm1.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 1...");
sleep(1000);
alloc1Response = am1.schedule();
}
// assert RMIdentifer is set properly in allocated containers
Container allocatedContainer = alloc1Response.getAllocatedContainers().get(0);
ContainerTokenIdentifier tokenId = BuilderUtils.newContainerTokenIdentifier(allocatedContainer.getContainerToken());
Assert.assertEquals(MockRM.getClusterTimeStamp(), tokenId.getRMIdentifier());
rm.stop();
}
use of org.apache.hadoop.yarn.security.ContainerTokenIdentifier in project hadoop by apache.
the class TestNMLeveldbStateStoreService method createContainerRequest.
private StartContainerRequest createContainerRequest(ContainerId containerId) {
LocalResource lrsrc = LocalResource.newInstance(URL.newInstance("hdfs", "somehost", 12345, "/some/path/to/rsrc"), LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, 123L, 1234567890L);
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put("rsrc", lrsrc);
Map<String, String> env = new HashMap<String, String>();
env.put("somevar", "someval");
List<String> containerCmds = new ArrayList<String>();
containerCmds.add("somecmd");
containerCmds.add("somearg");
Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
serviceData.put("someservice", ByteBuffer.wrap(new byte[] { 0x1, 0x2, 0x3 }));
ByteBuffer containerTokens = ByteBuffer.wrap(new byte[] { 0x7, 0x8, 0x9, 0xa });
Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>();
acls.put(ApplicationAccessType.VIEW_APP, "viewuser");
acls.put(ApplicationAccessType.MODIFY_APP, "moduser");
ContainerLaunchContext clc = ContainerLaunchContext.newInstance(localResources, env, containerCmds, serviceData, containerTokens, acls);
Resource containerRsrc = Resource.newInstance(1357, 3);
ContainerTokenIdentifier containerTokenId = new ContainerTokenIdentifier(containerId, "host", "user", containerRsrc, 9876543210L, 42, 2468, Priority.newInstance(7), 13579);
Token containerToken = Token.newInstance(containerTokenId.getBytes(), ContainerTokenIdentifier.KIND.toString(), "password".getBytes(), "tokenservice");
return StartContainerRequest.newInstance(clc, containerToken);
}
use of org.apache.hadoop.yarn.security.ContainerTokenIdentifier in project hadoop by apache.
the class TestContainersMonitor method testContainerKillOnMemoryOverflow.
@Test
public void testContainerKillOnMemoryOverflow() throws IOException, InterruptedException, YarnException {
if (!ProcfsBasedProcessTree.isAvailable()) {
return;
}
containerManager.start();
File scriptFile = new File(tmpDir, "scriptFile.sh");
PrintWriter fileWriter = new PrintWriter(scriptFile);
File processStartFile = new File(tmpDir, "start_file.txt").getAbsoluteFile();
// So that start file is readable by the
fileWriter.write("\numask 0");
// test.
fileWriter.write("\necho Hello World! > " + processStartFile);
fileWriter.write("\necho $$ >> " + processStartFile);
fileWriter.write("\nsleep 15");
fileWriter.close();
ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
// ////// Construct the Container-id
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile = "dest_file";
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put(destinationFile, rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
List<String> commands = new ArrayList<String>();
commands.add("/bin/bash");
commands.add(scriptFile.getAbsolutePath());
containerLaunchContext.setCommands(commands);
Resource r = BuilderUtils.newResource(0, 0);
ContainerTokenIdentifier containerIdentifier = new ContainerTokenIdentifier(cId, context.getNodeId().toString(), user, r, System.currentTimeMillis() + 120000, 123, DUMMY_RM_IDENTIFIER, Priority.newInstance(0), 0);
Token containerToken = BuilderUtils.newContainerToken(context.getNodeId(), containerManager.getContext().getContainerTokenSecretManager().createPassword(containerIdentifier), containerIdentifier);
StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, containerToken);
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
int timeoutSecs = 0;
while (!processStartFile.exists() && timeoutSecs++ < 20) {
Thread.sleep(1000);
LOG.info("Waiting for process start-file to be created");
}
Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists());
// Now verify the contents of the file
BufferedReader reader = new BufferedReader(new FileReader(processStartFile));
Assert.assertEquals("Hello World!", reader.readLine());
// Get the pid of the process
String pid = reader.readLine().trim();
// No more lines
Assert.assertEquals(null, reader.readLine());
BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE, 60);
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(cId);
GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM, containerStatus.getExitStatus());
String expectedMsgPattern = "Container \\[pid=" + pid + ",containerID=" + cId + "\\] is running beyond virtual memory limits. Current usage: " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. " + "Killing container.\nDump of the process-tree for " + cId + " :\n";
Pattern pat = Pattern.compile(expectedMsgPattern);
Assert.assertEquals("Expected message pattern is: " + expectedMsgPattern + "\n\nObserved message is: " + containerStatus.getDiagnostics(), true, pat.matcher(containerStatus.getDiagnostics()).find());
// Assert that the process is not alive anymore
Assert.assertFalse("Process is still alive!", exec.signalContainer(new ContainerSignalContext.Builder().setUser(user).setPid(pid).setSignal(Signal.NULL).build()));
}
Aggregations