use of org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService in project hadoop by apache.
the class TestLocalResourcesTrackerImpl method testReleaseWhileDownloading.
@Test
@SuppressWarnings("unchecked")
public void testReleaseWhileDownloading() throws Exception {
String user = "testuser";
DrainDispatcher dispatcher = null;
try {
Configuration conf = new Configuration();
dispatcher = createDispatcher(conf);
EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class);
EventHandler<LocalizerEvent> containerEventHandler = mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class, localizerEventHandler);
dispatcher.register(ContainerEventType.class, containerEventHandler);
ContainerId cId = BuilderUtils.newContainerId(1, 1, 1, 1);
LocalizerContext lc = new LocalizerContext(user, cId, null);
LocalResourceRequest req = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.PUBLIC);
LocalizedResource lr = createLocalizedResource(req, dispatcher);
ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc = new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>();
localrsrc.put(req, lr);
LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user, null, dispatcher, localrsrc, false, conf, new NMNullStateStoreService(), null);
// request the resource
ResourceEvent reqEvent = new ResourceRequestEvent(req, LocalResourceVisibility.PUBLIC, lc);
tracker.handle(reqEvent);
// release the resource
ResourceEvent relEvent = new ResourceReleaseEvent(req, cId);
tracker.handle(relEvent);
// download completing after release
ResourceLocalizedEvent rle = new ResourceLocalizedEvent(req, new Path("file:///tmp/r1"), 1);
tracker.handle(rle);
dispatcher.await();
} finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
use of org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService in project hadoop by apache.
the class TestNonAggregatingLogHandler method testFailedDirLogDeletion.
/*
* Test to ensure that we handle the cleanup of directories that may not have
* the application log dirs we're trying to delete or may have other problems.
* Test creates 7 log dirs, and fails the directory check for 4 of them and
* then checks to ensure we tried to delete only the ones that passed the
* check.
*/
@Test
public void testFailedDirLogDeletion() throws Exception {
File[] localLogDirs = getLocalLogDirFiles(this.getClass().getName(), 7);
final List<String> localLogDirPaths = new ArrayList<String>(localLogDirs.length);
for (int i = 0; i < localLogDirs.length; i++) {
localLogDirPaths.add(localLogDirs[i].getAbsolutePath());
}
String localLogDirsString = StringUtils.join(localLogDirPaths, ",");
conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString);
conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 0l);
LocalDirsHandlerService mockDirsHandler = mock(LocalDirsHandlerService.class);
NonAggregatingLogHandler rawLogHandler = new NonAggregatingLogHandler(dispatcher, mockDelService, mockDirsHandler, new NMNullStateStoreService());
NonAggregatingLogHandler logHandler = spy(rawLogHandler);
AbstractFileSystem spylfs = spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
FileContext lfs = FileContext.getFileContext(spylfs, conf);
doReturn(lfs).when(logHandler).getLocalFileContext(isA(Configuration.class));
logHandler.init(conf);
logHandler.start();
runMockedFailedDirs(logHandler, appId, user, mockDelService, mockDirsHandler, conf, spylfs, lfs, localLogDirs);
logHandler.close();
}
use of org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService in project hadoop by apache.
the class TestNMWebServer method testNMWebApp.
@Test
public void testNMWebApp() throws IOException, YarnException {
Configuration conf = new Configuration();
Context nmContext = new NodeManager.NMContext(null, null, null, null, null, false, conf);
ResourceView resourceView = new ResourceView() {
@Override
public long getVmemAllocatedForContainers() {
return 0;
}
@Override
public long getPmemAllocatedForContainers() {
return 0;
}
@Override
public long getVCoresAllocatedForContainers() {
return 0;
}
@Override
public boolean isVmemCheckEnabled() {
return true;
}
@Override
public boolean isPmemCheckEnabled() {
return true;
}
};
conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath());
NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
WebServer server = new WebServer(nmContext, resourceView, new ApplicationACLsManager(conf), dirsHandler);
server.init(conf);
server.start();
// Add an application and the corresponding containers
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
Dispatcher dispatcher = new AsyncDispatcher();
String user = "nobody";
long clusterTimeStamp = 1234;
ApplicationId appId = BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
Application app = mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
nmContext.getApplications().put(appId, app);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
ContainerId container2 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 1);
NodeManagerMetrics metrics = mock(NodeManagerMetrics.class);
NMStateStoreService stateStore = new NMNullStateStoreService();
for (ContainerId containerId : new ContainerId[] { container1, container2 }) {
// TODO: Use builder utils
ContainerLaunchContext launchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
long currentTime = System.currentTimeMillis();
Token containerToken = BuilderUtils.newContainerToken(containerId, 0, "127.0.0.1", 1234, user, BuilderUtils.newResource(1024, 1), currentTime + 10000L, 123, "password".getBytes(), currentTime);
Context context = mock(Context.class);
Container container = new ContainerImpl(conf, dispatcher, launchContext, null, metrics, BuilderUtils.newContainerTokenIdentifier(containerToken), context) {
@Override
public ContainerState getContainerState() {
return ContainerState.RUNNING;
}
;
};
nmContext.getContainers().put(containerId, container);
//TODO: Gross hack. Fix in code.
ApplicationId applicationId = containerId.getApplicationAttemptId().getApplicationId();
nmContext.getApplications().get(applicationId).getContainers().put(containerId, container);
writeContainerLogs(nmContext, containerId, dirsHandler);
}
// TODO: Pull logs and test contents.
// Thread.sleep(1000000);
}
use of org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService in project hadoop by apache.
the class TestNMAppsPage method testNMAppsPage.
@Test
public void testNMAppsPage() {
Configuration conf = new Configuration();
final NMContext nmcontext = new NMContext(new NMContainerTokenSecretManager(conf), new NMTokenSecretManagerInNM(), null, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
Injector injector = WebAppTests.createMockInjector(NMContext.class, nmcontext, new Module() {
@Override
public void configure(Binder binder) {
NodeManager nm = TestNMAppsPage.mocknm(nmcontext);
binder.bind(NodeManager.class).toInstance(nm);
binder.bind(Context.class).toInstance(nmcontext);
}
});
ApplicationBlock instance = injector.getInstance(ApplicationBlock.class);
instance.set(YarnWebParams.APPLICATION_ID, applicationid);
instance.render();
}
Aggregations