use of org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest in project hadoop by apache.
the class TestEventFlow method testSuccessfulContainerLaunch.
@Test
public void testSuccessfulContainerLaunch() throws InterruptedException, IOException, YarnException {
FileContext localFS = FileContext.getLocalFSFileContext();
localFS.delete(new Path(localDir.getAbsolutePath()), true);
localFS.delete(new Path(localLogDir.getAbsolutePath()), true);
localFS.delete(new Path(remoteLogDir.getAbsolutePath()), true);
localDir.mkdir();
localLogDir.mkdir();
remoteLogDir.mkdir();
YarnConfiguration conf = new YarnConfiguration();
Context context = new NMContext(new NMContainerTokenSecretManager(conf), new NMTokenSecretManagerInNM(), null, null, new NMNullStateStoreService(), false, conf) {
@Override
public int getHttpPort() {
return 1234;
}
};
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:" + ServerSocketUtil.getPort(8040, 10));
ContainerExecutor exec = new DefaultContainerExecutor();
exec.setConf(conf);
DeletionService del = new DeletionService(exec);
Dispatcher dispatcher = new AsyncDispatcher();
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
NodeHealthCheckerService healthChecker = new NodeHealthCheckerService(NodeManager.getNodeHealthScriptRunner(conf), dirsHandler);
healthChecker.init(conf);
NodeManagerMetrics metrics = NodeManagerMetrics.create();
NodeStatusUpdater nodeStatusUpdater = new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, metrics) {
@Override
protected ResourceTracker getRMClient() {
return new LocalRMInterface();
}
;
@Override
protected void stopRMProxy() {
return;
}
@Override
protected void startStatusUpdater() {
// Don't start any updating thread.
return;
}
@Override
public long getRMIdentifier() {
return SIMULATED_RM_IDENTIFIER;
}
};
DummyContainerManager containerManager = new DummyContainerManager(context, exec, del, nodeStatusUpdater, metrics, dirsHandler);
nodeStatusUpdater.init(conf);
((NMContext) context).setContainerManager(containerManager);
nodeStatusUpdater.start();
((NMContext) context).setNodeStatusUpdater(nodeStatusUpdater);
containerManager.init(conf);
containerManager.start();
ContainerLaunchContext launchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
ApplicationId applicationId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, 0);
ContainerId cID = ContainerId.newContainerId(applicationAttemptId, 0);
String user = "testing";
StartContainerRequest scRequest = StartContainerRequest.newInstance(launchContext, TestContainerManager.createContainerToken(cID, SIMULATED_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager()));
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
BaseContainerManagerTest.waitForContainerState(containerManager, cID, Arrays.asList(ContainerState.RUNNING, ContainerState.SCHEDULED), 20);
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(cID);
StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds);
containerManager.stopContainers(stopRequest);
BaseContainerManagerTest.waitForContainerState(containerManager, cID, ContainerState.COMPLETE);
containerManager.stop();
}
use of org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest in project hadoop by apache.
the class TestContainerLaunch method testContainerEnvVariables.
/**
* See if environment variable is forwarded using sanitizeEnv.
* @throws Exception
*/
@Test(timeout = 60000)
public void testContainerEnvVariables() throws Exception {
containerManager.start();
ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
// ////// Construct the Container-id
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
Map<String, String> userSetEnv = new HashMap<String, String>();
userSetEnv.put(Environment.CONTAINER_ID.name(), "user_set_container_id");
userSetEnv.put(Environment.NM_HOST.name(), "user_set_NM_HOST");
userSetEnv.put(Environment.NM_PORT.name(), "user_set_NM_PORT");
userSetEnv.put(Environment.NM_HTTP_PORT.name(), "user_set_NM_HTTP_PORT");
userSetEnv.put(Environment.LOCAL_DIRS.name(), "user_set_LOCAL_DIR");
userSetEnv.put(Environment.USER.key(), "user_set_" + Environment.USER.key());
userSetEnv.put(Environment.LOGNAME.name(), "user_set_LOGNAME");
userSetEnv.put(Environment.PWD.name(), "user_set_PWD");
userSetEnv.put(Environment.HOME.name(), "user_set_HOME");
containerLaunchContext.setEnvironment(userSetEnv);
File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
PrintWriter fileWriter = new PrintWriter(scriptFile);
File processStartFile = new File(tmpDir, "env_vars.txt").getAbsoluteFile();
if (Shell.WINDOWS) {
fileWriter.println("@echo " + Environment.CONTAINER_ID.$() + "> " + processStartFile);
fileWriter.println("@echo " + Environment.NM_HOST.$() + ">> " + processStartFile);
fileWriter.println("@echo " + Environment.NM_PORT.$() + ">> " + processStartFile);
fileWriter.println("@echo " + Environment.NM_HTTP_PORT.$() + ">> " + processStartFile);
fileWriter.println("@echo " + Environment.LOCAL_DIRS.$() + ">> " + processStartFile);
fileWriter.println("@echo " + Environment.USER.$() + ">> " + processStartFile);
fileWriter.println("@echo " + Environment.LOGNAME.$() + ">> " + processStartFile);
fileWriter.println("@echo " + Environment.PWD.$() + ">> " + processStartFile);
fileWriter.println("@echo " + Environment.HOME.$() + ">> " + processStartFile);
for (String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
fileWriter.println("@echo %" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName + "%>> " + processStartFile);
}
fileWriter.println("@echo " + cId + ">> " + processStartFile);
fileWriter.println("@ping -n 100 127.0.0.1 >nul");
} else {
// So that start file is readable by the test
fileWriter.write("\numask 0");
fileWriter.write("\necho $" + Environment.CONTAINER_ID.name() + " > " + processStartFile);
fileWriter.write("\necho $" + Environment.NM_HOST.name() + " >> " + processStartFile);
fileWriter.write("\necho $" + Environment.NM_PORT.name() + " >> " + processStartFile);
fileWriter.write("\necho $" + Environment.NM_HTTP_PORT.name() + " >> " + processStartFile);
fileWriter.write("\necho $" + Environment.LOCAL_DIRS.name() + " >> " + processStartFile);
fileWriter.write("\necho $" + Environment.USER.name() + " >> " + processStartFile);
fileWriter.write("\necho $" + Environment.LOGNAME.name() + " >> " + processStartFile);
fileWriter.write("\necho $" + Environment.PWD.name() + " >> " + processStartFile);
fileWriter.write("\necho $" + Environment.HOME.name() + " >> " + processStartFile);
for (String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
fileWriter.write("\necho $" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName + " >> " + processStartFile);
}
fileWriter.write("\necho $$ >> " + processStartFile);
fileWriter.write("\nexec sleep 100");
}
fileWriter.close();
// upload the script file so that the container can run it
URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile = "dest_file";
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put(destinationFile, rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
// set up the rest of the container
List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
containerLaunchContext.setCommands(commands);
StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId, Priority.newInstance(0), 0));
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
int timeoutSecs = 0;
while (!processStartFile.exists() && timeoutSecs++ < 20) {
Thread.sleep(1000);
LOG.info("Waiting for process start-file to be created");
}
Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists());
// Now verify the contents of the file
List<String> localDirs = dirsHandler.getLocalDirs();
List<String> logDirs = dirsHandler.getLogDirs();
List<Path> appDirs = new ArrayList<Path>(localDirs.size());
for (String localDir : localDirs) {
Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE);
Path userdir = new Path(usersdir, user);
Path appsdir = new Path(userdir, ContainerLocalizer.APPCACHE);
appDirs.add(new Path(appsdir, appId.toString()));
}
List<String> containerLogDirs = new ArrayList<String>();
String relativeContainerLogDir = ContainerLaunch.getRelativeContainerLogDir(appId.toString(), cId.toString());
for (String logDir : logDirs) {
containerLogDirs.add(logDir + Path.SEPARATOR + relativeContainerLogDir);
}
BufferedReader reader = new BufferedReader(new FileReader(processStartFile));
Assert.assertEquals(cId.toString(), reader.readLine());
Assert.assertEquals(context.getNodeId().getHost(), reader.readLine());
Assert.assertEquals(String.valueOf(context.getNodeId().getPort()), reader.readLine());
Assert.assertEquals(String.valueOf(HTTP_PORT), reader.readLine());
Assert.assertEquals(StringUtils.join(",", appDirs), reader.readLine());
Assert.assertEquals(user, reader.readLine());
Assert.assertEquals(user, reader.readLine());
String obtainedPWD = reader.readLine();
boolean found = false;
for (Path localDir : appDirs) {
if (new Path(localDir, cId.toString()).toString().equals(obtainedPWD)) {
found = true;
break;
}
}
Assert.assertTrue("Wrong local-dir found : " + obtainedPWD, found);
Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR, YarnConfiguration.DEFAULT_NM_USER_HOME_DIR), reader.readLine());
for (String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
Assert.assertEquals(containerManager.getAuxServiceMetaData().get(serviceName), ByteBuffer.wrap(Base64.decodeBase64(reader.readLine().getBytes())));
}
Assert.assertEquals(cId.toString(), containerLaunchContext.getEnvironment().get(Environment.CONTAINER_ID.name()));
Assert.assertEquals(context.getNodeId().getHost(), containerLaunchContext.getEnvironment().get(Environment.NM_HOST.name()));
Assert.assertEquals(String.valueOf(context.getNodeId().getPort()), containerLaunchContext.getEnvironment().get(Environment.NM_PORT.name()));
Assert.assertEquals(String.valueOf(HTTP_PORT), containerLaunchContext.getEnvironment().get(Environment.NM_HTTP_PORT.name()));
Assert.assertEquals(StringUtils.join(",", appDirs), containerLaunchContext.getEnvironment().get(Environment.LOCAL_DIRS.name()));
Assert.assertEquals(StringUtils.join(",", containerLogDirs), containerLaunchContext.getEnvironment().get(Environment.LOG_DIRS.name()));
Assert.assertEquals(user, containerLaunchContext.getEnvironment().get(Environment.USER.name()));
Assert.assertEquals(user, containerLaunchContext.getEnvironment().get(Environment.LOGNAME.name()));
found = false;
obtainedPWD = containerLaunchContext.getEnvironment().get(Environment.PWD.name());
for (Path localDir : appDirs) {
if (new Path(localDir, cId.toString()).toString().equals(obtainedPWD)) {
found = true;
break;
}
}
Assert.assertTrue("Wrong local-dir found : " + obtainedPWD, found);
Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR, YarnConfiguration.DEFAULT_NM_USER_HOME_DIR), containerLaunchContext.getEnvironment().get(Environment.HOME.name()));
// Get the pid of the process
String pid = reader.readLine().trim();
// No more lines
Assert.assertEquals(null, reader.readLine());
// Now test the stop functionality.
// Assert that the process is alive
Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid));
// Once more
Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid));
// Now test the stop functionality.
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(cId);
StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds);
containerManager.stopContainers(stopRequest);
BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE);
GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
int expectedExitCode = ContainerExitStatus.KILLED_BY_APPMASTER;
Assert.assertEquals(expectedExitCode, containerStatus.getExitStatus());
// Assert that the process is not alive anymore
Assert.assertFalse("Process is still alive!", DefaultContainerExecutor.containerIsAlive(pid));
}
use of org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest in project hadoop by apache.
the class TestContainerManager method testChangeContainerResource.
@Test
public void testChangeContainerResource() throws Exception {
containerManager.start();
File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
PrintWriter fileWriter = new PrintWriter(scriptFile);
// Construct the Container-id
ContainerId cId = createContainerId(0);
if (Shell.WINDOWS) {
fileWriter.println("@ping -n 100 127.0.0.1 >nul");
} else {
fileWriter.write("\numask 0");
fileWriter.write("\nexec sleep 100");
}
fileWriter.close();
ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile = "dest_file";
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put(destinationFile, rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
containerLaunchContext.setCommands(commands);
StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager()));
List<StartContainerRequest> list = new ArrayList<>();
list.add(scRequest);
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
// Make sure the container reaches RUNNING state
BaseContainerManagerTest.waitForNMContainerState(containerManager, cId, org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.RUNNING);
// Construct container resource increase request,
List<Token> increaseTokens = new ArrayList<>();
// Add increase request.
Resource targetResource = Resource.newInstance(4096, 2);
Token containerToken = createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, targetResource, context.getContainerTokenSecretManager(), null);
increaseTokens.add(containerToken);
IncreaseContainersResourceRequest increaseRequest = IncreaseContainersResourceRequest.newInstance(increaseTokens);
IncreaseContainersResourceResponse increaseResponse = containerManager.increaseContainersResource(increaseRequest);
Assert.assertEquals(1, increaseResponse.getSuccessfullyIncreasedContainers().size());
Assert.assertTrue(increaseResponse.getFailedRequests().isEmpty());
// Check status
List<ContainerId> containerIds = new ArrayList<>();
containerIds.add(cId);
GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
// Check status immediately as resource increase is blocking
assertEquals(targetResource, containerStatus.getCapability());
// Simulate a decrease request
List<org.apache.hadoop.yarn.api.records.Container> containersToDecrease = new ArrayList<>();
targetResource = Resource.newInstance(2048, 2);
org.apache.hadoop.yarn.api.records.Container decreasedContainer = org.apache.hadoop.yarn.api.records.Container.newInstance(cId, null, null, targetResource, null, null);
containersToDecrease.add(decreasedContainer);
containerManager.handle(new CMgrDecreaseContainersResourceEvent(containersToDecrease));
// Check status with retry
containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
int retry = 0;
while (!targetResource.equals(containerStatus.getCapability()) && (retry++ < 5)) {
Thread.sleep(200);
containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
}
assertEquals(targetResource, containerStatus.getCapability());
}
use of org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest in project hadoop by apache.
the class TestContainerManager method testContainerLaunchAndStop.
//@Test
public void testContainerLaunchAndStop() throws IOException, InterruptedException, YarnException {
containerManager.start();
File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
PrintWriter fileWriter = new PrintWriter(scriptFile);
File processStartFile = new File(tmpDir, "start_file.txt").getAbsoluteFile();
// ////// Construct the Container-id
ContainerId cId = createContainerId(0);
if (Shell.WINDOWS) {
fileWriter.println("@echo Hello World!> " + processStartFile);
fileWriter.println("@echo " + cId + ">> " + processStartFile);
fileWriter.println("@ping -n 100 127.0.0.1 >nul");
} else {
// So that start file is readable by the test
fileWriter.write("\numask 0");
fileWriter.write("\necho Hello World! > " + processStartFile);
fileWriter.write("\necho $$ >> " + processStartFile);
fileWriter.write("\nexec sleep 100");
}
fileWriter.close();
ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile = "dest_file";
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put(destinationFile, rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
containerLaunchContext.setCommands(commands);
StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager()));
List<StartContainerRequest> list = new ArrayList<>();
list.add(scRequest);
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
int timeoutSecs = 0;
while (!processStartFile.exists() && timeoutSecs++ < 20) {
Thread.sleep(1000);
LOG.info("Waiting for process start-file to be created");
}
Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists());
// Now verify the contents of the file
BufferedReader reader = new BufferedReader(new FileReader(processStartFile));
Assert.assertEquals("Hello World!", reader.readLine());
// Get the pid of the process
String pid = reader.readLine().trim();
// No more lines
Assert.assertEquals(null, reader.readLine());
// Now test the stop functionality.
// Assert that the process is alive
Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid));
// Once more
Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid));
List<ContainerId> containerIds = new ArrayList<>();
containerIds.add(cId);
StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds);
containerManager.stopContainers(stopRequest);
BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE);
GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
int expectedExitCode = ContainerExitStatus.KILLED_BY_APPMASTER;
Assert.assertEquals(expectedExitCode, containerStatus.getExitStatus());
// Assert that the process is not alive anymore
Assert.assertFalse("Process is still alive!", DefaultContainerExecutor.containerIsAlive(pid));
}
use of org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest in project hadoop by apache.
the class TestContainerManager method testIncreaseContainerResourceWithInvalidResource.
@Test
public void testIncreaseContainerResourceWithInvalidResource() throws Exception {
containerManager.start();
File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
PrintWriter fileWriter = new PrintWriter(scriptFile);
// Construct the Container-id
ContainerId cId = createContainerId(0);
if (Shell.WINDOWS) {
fileWriter.println("@ping -n 100 127.0.0.1 >nul");
} else {
fileWriter.write("\numask 0");
fileWriter.write("\nexec sleep 100");
}
fileWriter.close();
ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile = "dest_file";
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put(destinationFile, rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
containerLaunchContext.setCommands(commands);
StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager()));
List<StartContainerRequest> list = new ArrayList<>();
list.add(scRequest);
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
// Make sure the container reaches RUNNING state
BaseContainerManagerTest.waitForNMContainerState(containerManager, cId, org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.RUNNING);
// Construct container resource increase request,
List<Token> increaseTokens = new ArrayList<>();
// Add increase request. The increase request should fail
// as the current resource does not fit in the target resource
Token containerToken = createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, Resource.newInstance(512, 1), context.getContainerTokenSecretManager(), null);
increaseTokens.add(containerToken);
IncreaseContainersResourceRequest increaseRequest = IncreaseContainersResourceRequest.newInstance(increaseTokens);
IncreaseContainersResourceResponse increaseResponse = containerManager.increaseContainersResource(increaseRequest);
// Check response
Assert.assertEquals(0, increaseResponse.getSuccessfullyIncreasedContainers().size());
Assert.assertEquals(1, increaseResponse.getFailedRequests().size());
for (Map.Entry<ContainerId, SerializedException> entry : increaseResponse.getFailedRequests().entrySet()) {
if (cId.equals(entry.getKey())) {
Assert.assertNotNull("Failed message", entry.getValue().getMessage());
Assert.assertTrue(entry.getValue().getMessage().contains("The target resource " + Resource.newInstance(512, 1).toString() + " is smaller than the current resource " + Resource.newInstance(1024, 1)));
} else {
throw new YarnException("Received failed request from wrong" + " container: " + entry.getKey().toString());
}
}
}
Aggregations