use of org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor in project hadoop by apache.
the class TestResourceLocalizationService method testDirectoryCleanupOnNewlyCreatedStateStore.
@Test
public void testDirectoryCleanupOnNewlyCreatedStateStore() throws IOException, URISyntaxException {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(new Configuration());
ContainerExecutor exec = mock(ContainerExecutor.class);
DeletionService delService = spy(new DeletionService(exec));
delService.init(conf);
delService.start();
List<Path> localDirs = new ArrayList<Path>();
String[] sDirs = new String[4];
for (int i = 0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
sDirs[i] = localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
LocalDirsHandlerService diskhandler = new LocalDirsHandlerService();
diskhandler.init(conf);
NMStateStoreService nmStateStoreService = mock(NMStateStoreService.class);
when(nmStateStoreService.canRecover()).thenReturn(true);
when(nmStateStoreService.isNewlyCreated()).thenReturn(true);
ResourceLocalizationService locService = spy(new ResourceLocalizationService(dispatcher, exec, delService, diskhandler, nmContext));
doReturn(lfs).when(locService).getLocalFileContext(isA(Configuration.class));
try {
dispatcher.start();
// initialize ResourceLocalizationService
locService.init(conf);
final FsPermission defaultPerm = new FsPermission((short) 0755);
// verify directory creation
for (Path p : localDirs) {
p = new Path((new URI(p.toString())).getPath());
Path usercache = new Path(p, ContainerLocalizer.USERCACHE);
verify(spylfs).rename(eq(usercache), any(Path.class), any(Options.Rename.class));
verify(spylfs).mkdir(eq(usercache), eq(defaultPerm), eq(true));
Path publicCache = new Path(p, ContainerLocalizer.FILECACHE);
verify(spylfs).rename(eq(usercache), any(Path.class), any(Options.Rename.class));
verify(spylfs).mkdir(eq(publicCache), eq(defaultPerm), eq(true));
Path nmPriv = new Path(p, ResourceLocalizationService.NM_PRIVATE_DIR);
verify(spylfs).rename(eq(usercache), any(Path.class), any(Options.Rename.class));
verify(spylfs).mkdir(eq(nmPriv), eq(ResourceLocalizationService.NM_PRIVATE_PERM), eq(true));
}
} finally {
dispatcher.stop();
delService.stop();
}
}
use of org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor in project hadoop by apache.
the class TestContainerSchedulerQueuing method createContainerManager.
@Override
protected ContainerManagerImpl createContainerManager(DeletionService delSrvc) {
return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater, metrics, dirsHandler) {
@Override
public void setBlockNewContainerRequests(boolean blockNewContainerRequests) {
// do nothing
}
@Override
protected UserGroupInformation getRemoteUgi() throws YarnException {
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(appAttemptId.toString());
ugi.addTokenIdentifier(new NMTokenIdentifier(appAttemptId, context.getNodeId(), user, context.getNMTokenSecretManager().getCurrentKey().getKeyId()));
return ugi;
}
@Override
protected ContainersMonitor createContainersMonitor(ContainerExecutor exec) {
return new ContainersMonitorImpl(exec, dispatcher, this.context) {
// Define resources available for containers to be executed.
@Override
public long getPmemAllocatedForContainers() {
return 2048 * 1024 * 1024L;
}
@Override
public long getVmemAllocatedForContainers() {
float pmemRatio = getConfig().getFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO, YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO);
return (long) (pmemRatio * getPmemAllocatedForContainers());
}
@Override
public long getVCoresAllocatedForContainers() {
return 4;
}
};
}
};
}
Aggregations