use of org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor in project hadoop by apache.
the class TestResourceLocalizationService method testLocalizerRunnerException.
@Test(timeout = 10000)
// mocked generics
@SuppressWarnings("unchecked")
public void testLocalizerRunnerException() throws Exception {
DrainDispatcher dispatcher = new DrainDispatcher();
dispatcher.init(conf);
dispatcher.start();
EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class, applicationBus);
EventHandler<ContainerEvent> containerBus = mock(EventHandler.class);
dispatcher.register(ContainerEventType.class, containerBus);
ContainerExecutor exec = mock(ContainerExecutor.class);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
LocalDirsHandlerService dirsHandlerSpy = spy(dirsHandler);
dirsHandlerSpy.init(conf);
DeletionService delServiceReal = new DeletionService(exec);
DeletionService delService = spy(delServiceReal);
delService.init(new Configuration());
delService.start();
ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, dirsHandlerSpy, nmContext);
ResourceLocalizationService spyService = spy(rawService);
doReturn(mockServer).when(spyService).createServer();
try {
spyService.init(conf);
spyService.start();
// init application
final Application app = mock(Application.class);
final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3);
when(app.getUser()).thenReturn("user0");
when(app.getAppId()).thenReturn(appId);
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES, app));
dispatcher.await();
Random r = new Random();
long seed = r.nextLong();
System.out.println("SEED: " + seed);
r.setSeed(seed);
final Container c = getMockContainer(appId, 42, "user0");
final LocalResource resource1 = getPrivateMockedResource(r);
System.out.println("Here 4");
final LocalResourceRequest req1 = new LocalResourceRequest(resource1);
Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>();
List<LocalResourceRequest> privateResourceList = new ArrayList<LocalResourceRequest>();
privateResourceList.add(req1);
rsrcs.put(LocalResourceVisibility.PRIVATE, privateResourceList);
final Constructor<?>[] constructors = FSError.class.getDeclaredConstructors();
constructors[0].setAccessible(true);
FSError fsError = (FSError) constructors[0].newInstance(new IOException("Disk Error"));
Mockito.doThrow(fsError).when(dirsHandlerSpy).getLocalPathForWrite(isA(String.class));
spyService.handle(new ContainerLocalizationRequestEvent(c, rsrcs));
Thread.sleep(1000);
dispatcher.await();
// Verify if ContainerResourceFailedEvent is invoked on FSError
verify(containerBus).handle(isA(ContainerResourceFailedEvent.class));
} finally {
spyService.stop();
dispatcher.stop();
delService.stop();
}
}
use of org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor in project hadoop by apache.
the class TestResourceLocalizationService method testFailedPublicResource.
@Test(timeout = 20000)
// mocked generics
@SuppressWarnings("unchecked")
public void testFailedPublicResource() throws Exception {
List<Path> localDirs = new ArrayList<Path>();
String[] sDirs = new String[4];
for (int i = 0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
sDirs[i] = localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
DrainDispatcher dispatcher = new DrainDispatcher();
EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class, applicationBus);
EventHandler<ContainerEvent> containerBus = mock(EventHandler.class);
dispatcher.register(ContainerEventType.class, containerBus);
ContainerExecutor exec = mock(ContainerExecutor.class);
DeletionService delService = mock(DeletionService.class);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
dispatcher.init(conf);
dispatcher.start();
try {
ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, dirsHandler, nmContext);
ResourceLocalizationService spyService = spy(rawService);
doReturn(mockServer).when(spyService).createServer();
doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));
spyService.init(conf);
spyService.start();
final String user = "user0";
// init application
final Application app = mock(Application.class);
final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES, app));
dispatcher.await();
// init container.
final Container c = getMockContainer(appId, 42, user);
// init resources
Random r = new Random();
long seed = r.nextLong();
System.out.println("SEED: " + seed);
r.setSeed(seed);
// cause chmod to fail after a delay
final CyclicBarrier barrier = new CyclicBarrier(2);
doAnswer(new Answer<Void>() {
public Void answer(InvocationOnMock invocation) throws IOException {
try {
barrier.await();
} catch (InterruptedException e) {
} catch (BrokenBarrierException e) {
}
throw new IOException("forced failure");
}
}).when(spylfs).setPermission(isA(Path.class), isA(FsPermission.class));
// Queue up two localization requests for the same public resource
final LocalResource pubResource = getPublicMockedResource(r);
final LocalResourceRequest pubReq = new LocalResourceRequest(pubResource);
Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>();
req.put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq));
Set<LocalResourceRequest> pubRsrcs = new HashSet<LocalResourceRequest>();
pubRsrcs.add(pubReq);
spyService.handle(new ContainerLocalizationRequestEvent(c, req));
spyService.handle(new ContainerLocalizationRequestEvent(c, req));
dispatcher.await();
// allow the chmod to fail now that both requests have been queued
barrier.await();
verify(containerBus, timeout(5000).times(2)).handle(isA(ContainerResourceFailedEvent.class));
} finally {
dispatcher.stop();
}
}
use of org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor in project hadoop by apache.
the class TestResourceLocalizationService method testLocalizationInit.
@Test
public void testLocalizationInit() throws Exception {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(new Configuration());
ContainerExecutor exec = mock(ContainerExecutor.class);
DeletionService delService = spy(new DeletionService(exec));
delService.init(conf);
delService.start();
List<Path> localDirs = new ArrayList<Path>();
String[] sDirs = new String[4];
for (int i = 0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
sDirs[i] = localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
LocalDirsHandlerService diskhandler = new LocalDirsHandlerService();
diskhandler.init(conf);
ResourceLocalizationService locService = spy(new ResourceLocalizationService(dispatcher, exec, delService, diskhandler, nmContext));
doReturn(lfs).when(locService).getLocalFileContext(isA(Configuration.class));
try {
dispatcher.start();
// initialize ResourceLocalizationService
locService.init(conf);
final FsPermission defaultPerm = new FsPermission((short) 0755);
// verify directory creation
for (Path p : localDirs) {
p = new Path((new URI(p.toString())).getPath());
Path usercache = new Path(p, ContainerLocalizer.USERCACHE);
verify(spylfs).mkdir(eq(usercache), eq(defaultPerm), eq(true));
Path publicCache = new Path(p, ContainerLocalizer.FILECACHE);
verify(spylfs).mkdir(eq(publicCache), eq(defaultPerm), eq(true));
Path nmPriv = new Path(p, ResourceLocalizationService.NM_PRIVATE_DIR);
verify(spylfs).mkdir(eq(nmPriv), eq(ResourceLocalizationService.NM_PRIVATE_PERM), eq(true));
}
} finally {
dispatcher.stop();
delService.stop();
}
}
use of org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor in project hadoop by apache.
the class TestContainerManagerRecovery method createContainerManager.
private ContainerManagerImpl createContainerManager(Context context) {
final LogHandler logHandler = mock(LogHandler.class);
final ResourceLocalizationService rsrcSrv = new ResourceLocalizationService(null, null, null, null, context) {
@Override
public void serviceInit(Configuration conf) throws Exception {
}
@Override
public void serviceStart() throws Exception {
// do nothing
}
@Override
public void serviceStop() throws Exception {
// do nothing
}
@Override
public void handle(LocalizationEvent event) {
// do nothing
}
};
final ContainersLauncher launcher = new ContainersLauncher(context, null, null, null, null) {
@Override
public void handle(ContainersLauncherEvent event) {
// do nothing
}
};
return new ContainerManagerImpl(context, mock(ContainerExecutor.class), mock(DeletionService.class), mock(NodeStatusUpdater.class), metrics, null) {
@Override
protected LogHandler createLogHandler(Configuration conf, Context context, DeletionService deletionService) {
return logHandler;
}
@Override
protected ResourceLocalizationService createResourceLocalizationService(ContainerExecutor exec, DeletionService deletionContext, Context context) {
return rsrcSrv;
}
@Override
protected ContainersLauncher createContainersLauncher(Context context, ContainerExecutor exec) {
return launcher;
}
@Override
public void setBlockNewContainerRequests(boolean blockNewContainerRequests) {
// do nothing
}
@Override
public NMTimelinePublisher createNMTimelinePublisher(Context context) {
return null;
}
};
}
use of org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor in project hadoop by apache.
the class TestContainerLaunch method testContainerLaunchStdoutAndStderrDiagnostics.
@Test(timeout = 20000)
public void testContainerLaunchStdoutAndStderrDiagnostics() throws IOException {
File shellFile = null;
try {
shellFile = Shell.appendScriptExtension(tmpDir, "hello");
// echo "hello" to stdout and "error" to stderr and exit code with 2;
String command = Shell.WINDOWS ? "@echo \"hello\" & @echo \"error\" 1>&2 & exit /b 2" : "echo \"hello\"; echo \"error\" 1>&2; exit 2;";
PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile));
FileUtil.setExecutable(shellFile, true);
writer.println(command);
writer.close();
Map<Path, List<String>> resources = new HashMap<Path, List<String>>();
FileOutputStream fos = new FileOutputStream(shellFile, true);
Map<String, String> env = new HashMap<String, String>();
List<String> commands = new ArrayList<String>();
commands.add(command);
ContainerExecutor exec = new DefaultContainerExecutor();
exec.setConf(new YarnConfiguration());
exec.writeLaunchEnv(fos, env, resources, commands, new Path(localLogDir.getAbsolutePath()), "user");
fos.flush();
fos.close();
Shell.ShellCommandExecutor shexc = new Shell.ShellCommandExecutor(new String[] { shellFile.getAbsolutePath() }, tmpDir);
String diagnostics = null;
try {
shexc.execute();
Assert.fail("Should catch exception");
} catch (ExitCodeException e) {
diagnostics = e.getMessage();
}
// test stderr
Assert.assertTrue(diagnostics.contains("error"));
// test stdout
Assert.assertTrue(shexc.getOutput().contains("hello"));
Assert.assertTrue(shexc.getExitCode() == 2);
} finally {
// cleanup
if (shellFile != null && shellFile.exists()) {
shellFile.delete();
}
}
}
Aggregations