use of org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext in project hadoop by apache.
the class TestContainerLogsPage method testContainerLogDirs.
@Test(timeout = 30000)
public void testContainerLogDirs() throws IOException, YarnException {
File absLogDir = new File("target", TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
String logdirwithFile = absLogDir.toURI().toString();
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
// Add an application and the corresponding containers
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
String user = "nobody";
long clusterTimeStamp = 1234;
ApplicationId appId = BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
Application app = mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
nmContext.getApplications().put(appId, app);
MockContainer container = new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user, appId, 1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(container1, container);
List<File> files = null;
files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
Assert.assertTrue(!(files.get(0).toString().contains("file:")));
// After container is completed, it is removed from nmContext
nmContext.getContainers().remove(container1);
Assert.assertNull(nmContext.getContainers().get(container1));
files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
Assert.assertTrue(!(files.get(0).toString().contains("file:")));
// Create a new context to check if correct container log dirs are fetched
// on full disk.
LocalDirsHandlerService dirsHandlerForFullDisk = spy(dirsHandler);
// good log dirs are empty and nm log dir is in the full log dir list.
when(dirsHandlerForFullDisk.getLogDirs()).thenReturn(new ArrayList<String>());
when(dirsHandlerForFullDisk.getLogDirsForRead()).thenReturn(Arrays.asList(new String[] { absLogDir.getAbsolutePath() }));
nmContext = new NodeManager.NMContext(null, null, dirsHandlerForFullDisk, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
nmContext.getApplications().put(appId, app);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(container1, container);
List<File> dirs = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
File containerLogDir = new File(absLogDir, appId + "/" + container1);
Assert.assertTrue(dirs.contains(containerLogDir));
}
use of org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext in project hadoop by apache.
the class TestContainerLogsPage method testContainerLogFile.
@Test(timeout = 30000)
public void testContainerLogFile() throws IOException, YarnException {
File absLogDir = new File("target", TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
String logdirwithFile = absLogDir.toURI().toString();
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 0.0f);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
// Add an application and the corresponding containers
String user = "nobody";
long clusterTimeStamp = 1234;
ApplicationId appId = BuilderUtils.newApplicationId(clusterTimeStamp, 1);
Application app = mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
nmContext.getApplications().put(appId, app);
MockContainer container = new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user, appId, 1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(containerId, container);
File containerLogDir = new File(absLogDir, ContainerLaunch.getRelativeContainerLogDir(appId.toString(), containerId.toString()));
containerLogDir.mkdirs();
String fileName = "fileName";
File containerLogFile = new File(containerLogDir, fileName);
containerLogFile.createNewFile();
File file = ContainerLogsUtils.getContainerLogFile(containerId, fileName, user, nmContext);
Assert.assertEquals(containerLogFile.toURI().toString(), file.toURI().toString());
FileUtil.fullyDelete(absLogDir);
}
use of org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext in project hadoop by apache.
the class TestLogAggregationService method setup.
@Override
@SuppressWarnings("unchecked")
public void setup() throws IOException {
super.setup();
NodeId nodeId = NodeId.newInstance("0.0.0.0", 5555);
((NMContext) context).setNodeId(nodeId);
dispatcher = createDispatcher();
appEventHandler = mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class, appEventHandler);
UserGroupInformation.setConfiguration(conf);
}
use of org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext in project hadoop by apache.
the class TestEventFlow method testSuccessfulContainerLaunch.
@Test
public void testSuccessfulContainerLaunch() throws InterruptedException, IOException, YarnException {
FileContext localFS = FileContext.getLocalFSFileContext();
localFS.delete(new Path(localDir.getAbsolutePath()), true);
localFS.delete(new Path(localLogDir.getAbsolutePath()), true);
localFS.delete(new Path(remoteLogDir.getAbsolutePath()), true);
localDir.mkdir();
localLogDir.mkdir();
remoteLogDir.mkdir();
YarnConfiguration conf = new YarnConfiguration();
Context context = new NMContext(new NMContainerTokenSecretManager(conf), new NMTokenSecretManagerInNM(), null, null, new NMNullStateStoreService(), false, conf) {
@Override
public int getHttpPort() {
return 1234;
}
};
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:" + ServerSocketUtil.getPort(8040, 10));
ContainerExecutor exec = new DefaultContainerExecutor();
exec.setConf(conf);
DeletionService del = new DeletionService(exec);
Dispatcher dispatcher = new AsyncDispatcher();
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
NodeHealthCheckerService healthChecker = new NodeHealthCheckerService(NodeManager.getNodeHealthScriptRunner(conf), dirsHandler);
healthChecker.init(conf);
NodeManagerMetrics metrics = NodeManagerMetrics.create();
NodeStatusUpdater nodeStatusUpdater = new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, metrics) {
@Override
protected ResourceTracker getRMClient() {
return new LocalRMInterface();
}
;
@Override
protected void stopRMProxy() {
return;
}
@Override
protected void startStatusUpdater() {
// Don't start any updating thread.
return;
}
@Override
public long getRMIdentifier() {
return SIMULATED_RM_IDENTIFIER;
}
};
DummyContainerManager containerManager = new DummyContainerManager(context, exec, del, nodeStatusUpdater, metrics, dirsHandler);
nodeStatusUpdater.init(conf);
((NMContext) context).setContainerManager(containerManager);
nodeStatusUpdater.start();
((NMContext) context).setNodeStatusUpdater(nodeStatusUpdater);
containerManager.init(conf);
containerManager.start();
ContainerLaunchContext launchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
ApplicationId applicationId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, 0);
ContainerId cID = ContainerId.newContainerId(applicationAttemptId, 0);
String user = "testing";
StartContainerRequest scRequest = StartContainerRequest.newInstance(launchContext, TestContainerManager.createContainerToken(cID, SIMULATED_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager()));
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
BaseContainerManagerTest.waitForContainerState(containerManager, cID, Arrays.asList(ContainerState.RUNNING, ContainerState.SCHEDULED), 20);
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(cID);
StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds);
containerManager.stopContainers(stopRequest);
BaseContainerManagerTest.waitForContainerState(containerManager, cID, ContainerState.COMPLETE);
containerManager.stop();
}
use of org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext in project hadoop by apache.
the class TestResourceLocalizationService method testPublicResourceInitializesLocalDir.
@Test
@SuppressWarnings("unchecked")
public void testPublicResourceInitializesLocalDir() throws Exception {
// Setup state to simulate restart NM with existing state meaning no
// directory creation during initialization
NMStateStoreService spyStateStore = spy(nmContext.getNMStateStore());
when(spyStateStore.canRecover()).thenReturn(true);
NMContext spyContext = spy(nmContext);
when(spyContext.getNMStateStore()).thenReturn(spyStateStore);
List<Path> localDirs = new ArrayList<Path>();
String[] sDirs = new String[4];
for (int i = 0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
sDirs[i] = localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
DrainDispatcher dispatcher = new DrainDispatcher();
EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class, applicationBus);
EventHandler<ContainerEvent> containerBus = mock(EventHandler.class);
dispatcher.register(ContainerEventType.class, containerBus);
ContainerExecutor exec = mock(ContainerExecutor.class);
DeletionService delService = mock(DeletionService.class);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
dispatcher.init(conf);
dispatcher.start();
try {
ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, dirsHandler, spyContext);
ResourceLocalizationService spyService = spy(rawService);
doReturn(mockServer).when(spyService).createServer();
doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));
spyService.init(conf);
final FsPermission defaultPerm = new FsPermission((short) 0755);
// verify directory is not created at initialization
for (Path p : localDirs) {
p = new Path((new URI(p.toString())).getPath());
Path publicCache = new Path(p, ContainerLocalizer.FILECACHE);
verify(spylfs, never()).mkdir(eq(publicCache), eq(defaultPerm), eq(true));
}
spyService.start();
final String user = "user0";
// init application
final Application app = mock(Application.class);
final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES, app));
dispatcher.await();
// init container.
final Container c = getMockContainer(appId, 42, user);
// init resources
Random r = new Random();
long seed = r.nextLong();
System.out.println("SEED: " + seed);
r.setSeed(seed);
// Queue up public resource localization
final LocalResource pubResource1 = getPublicMockedResource(r);
final LocalResourceRequest pubReq1 = new LocalResourceRequest(pubResource1);
LocalResource pubResource2 = null;
do {
pubResource2 = getPublicMockedResource(r);
} while (pubResource2 == null || pubResource2.equals(pubResource1));
// above call to make sure we don't get identical resources.
final LocalResourceRequest pubReq2 = new LocalResourceRequest(pubResource2);
Set<LocalResourceRequest> pubRsrcs = new HashSet<LocalResourceRequest>();
pubRsrcs.add(pubReq1);
pubRsrcs.add(pubReq2);
Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>();
req.put(LocalResourceVisibility.PUBLIC, pubRsrcs);
spyService.handle(new ContainerLocalizationRequestEvent(c, req));
dispatcher.await();
verify(spyService, times(1)).checkAndInitializeLocalDirs();
// verify directory creation
for (Path p : localDirs) {
p = new Path((new URI(p.toString())).getPath());
Path publicCache = new Path(p, ContainerLocalizer.FILECACHE);
verify(spylfs).mkdir(eq(publicCache), eq(defaultPerm), eq(true));
}
} finally {
dispatcher.stop();
}
}
Aggregations