use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.
the class BaseAMRMProxyTest method setUp.
@Before
public void setUp() {
this.conf = new YarnConfiguration();
this.conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
String mockPassThroughInterceptorClass = PassThroughRequestInterceptor.class.getName();
// Create a request intercepter pipeline for testing. The last one in the
// chain will call the mock resource manager. The others in the chain will
// simply forward it to the next one in the chain
this.conf.set(YarnConfiguration.AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE, mockPassThroughInterceptorClass + "," + mockPassThroughInterceptorClass + "," + mockPassThroughInterceptorClass + "," + MockRequestInterceptor.class.getName());
this.dispatcher = new AsyncDispatcher();
this.dispatcher.init(conf);
this.dispatcher.start();
this.amrmProxyService = createAndStartAMRMProxyService();
}
use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.
the class TestResourceLocalizationService method testDirectoryCleanupOnNewlyCreatedStateStore.
@Test
public void testDirectoryCleanupOnNewlyCreatedStateStore() throws IOException, URISyntaxException {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(new Configuration());
ContainerExecutor exec = mock(ContainerExecutor.class);
DeletionService delService = spy(new DeletionService(exec));
delService.init(conf);
delService.start();
List<Path> localDirs = new ArrayList<Path>();
String[] sDirs = new String[4];
for (int i = 0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
sDirs[i] = localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
LocalDirsHandlerService diskhandler = new LocalDirsHandlerService();
diskhandler.init(conf);
NMStateStoreService nmStateStoreService = mock(NMStateStoreService.class);
when(nmStateStoreService.canRecover()).thenReturn(true);
when(nmStateStoreService.isNewlyCreated()).thenReturn(true);
ResourceLocalizationService locService = spy(new ResourceLocalizationService(dispatcher, exec, delService, diskhandler, nmContext));
doReturn(lfs).when(locService).getLocalFileContext(isA(Configuration.class));
try {
dispatcher.start();
// initialize ResourceLocalizationService
locService.init(conf);
final FsPermission defaultPerm = new FsPermission((short) 0755);
// verify directory creation
for (Path p : localDirs) {
p = new Path((new URI(p.toString())).getPath());
Path usercache = new Path(p, ContainerLocalizer.USERCACHE);
verify(spylfs).rename(eq(usercache), any(Path.class), any(Options.Rename.class));
verify(spylfs).mkdir(eq(usercache), eq(defaultPerm), eq(true));
Path publicCache = new Path(p, ContainerLocalizer.FILECACHE);
verify(spylfs).rename(eq(usercache), any(Path.class), any(Options.Rename.class));
verify(spylfs).mkdir(eq(publicCache), eq(defaultPerm), eq(true));
Path nmPriv = new Path(p, ResourceLocalizationService.NM_PRIVATE_DIR);
verify(spylfs).rename(eq(usercache), any(Path.class), any(Options.Rename.class));
verify(spylfs).mkdir(eq(nmPriv), eq(ResourceLocalizationService.NM_PRIVATE_PERM), eq(true));
}
} finally {
dispatcher.stop();
delService.stop();
}
}
use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.
the class TestNMWebServicesApps method addAppContainers.
private HashMap<String, String> addAppContainers(Application app) throws IOException {
Dispatcher dispatcher = new AsyncDispatcher();
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(app.getAppId(), 1);
Container container1 = new MockContainer(appAttemptId, dispatcher, conf, app.getUser(), app.getAppId(), 1);
Container container2 = new MockContainer(appAttemptId, dispatcher, conf, app.getUser(), app.getAppId(), 2);
nmContext.getContainers().put(container1.getContainerId(), container1);
nmContext.getContainers().put(container2.getContainerId(), container2);
app.getContainers().put(container1.getContainerId(), container1);
app.getContainers().put(container2.getContainerId(), container2);
HashMap<String, String> hash = new HashMap<String, String>();
hash.put(container1.getContainerId().toString(), container1.getContainerId().toString());
hash.put(container2.getContainerId().toString(), container2.getContainerId().toString());
return hash;
}
use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.
the class TestNMWebServer method testNMWebApp.
@Test
public void testNMWebApp() throws IOException, YarnException {
Configuration conf = new Configuration();
Context nmContext = new NodeManager.NMContext(null, null, null, null, null, false, conf);
ResourceView resourceView = new ResourceView() {
@Override
public long getVmemAllocatedForContainers() {
return 0;
}
@Override
public long getPmemAllocatedForContainers() {
return 0;
}
@Override
public long getVCoresAllocatedForContainers() {
return 0;
}
@Override
public boolean isVmemCheckEnabled() {
return true;
}
@Override
public boolean isPmemCheckEnabled() {
return true;
}
};
conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath());
NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
WebServer server = new WebServer(nmContext, resourceView, new ApplicationACLsManager(conf), dirsHandler);
server.init(conf);
server.start();
// Add an application and the corresponding containers
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
Dispatcher dispatcher = new AsyncDispatcher();
String user = "nobody";
long clusterTimeStamp = 1234;
ApplicationId appId = BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
Application app = mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
nmContext.getApplications().put(appId, app);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
ContainerId container2 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 1);
NodeManagerMetrics metrics = mock(NodeManagerMetrics.class);
NMStateStoreService stateStore = new NMNullStateStoreService();
for (ContainerId containerId : new ContainerId[] { container1, container2 }) {
// TODO: Use builder utils
ContainerLaunchContext launchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
long currentTime = System.currentTimeMillis();
Token containerToken = BuilderUtils.newContainerToken(containerId, 0, "127.0.0.1", 1234, user, BuilderUtils.newResource(1024, 1), currentTime + 10000L, 123, "password".getBytes(), currentTime);
Context context = mock(Context.class);
Container container = new ContainerImpl(conf, dispatcher, launchContext, null, metrics, BuilderUtils.newContainerTokenIdentifier(containerToken), context) {
@Override
public ContainerState getContainerState() {
return ContainerState.RUNNING;
}
;
};
nmContext.getContainers().put(containerId, container);
//TODO: Gross hack. Fix in code.
ApplicationId applicationId = containerId.getApplicationAttemptId().getApplicationId();
nmContext.getApplications().get(applicationId).getContainers().put(containerId, container);
writeContainerLogs(nmContext, containerId, dirsHandler);
}
// TODO: Pull logs and test contents.
// Thread.sleep(1000000);
}
use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.
the class RMStateStore method serviceInit.
@Override
protected void serviceInit(Configuration conf) throws Exception {
// create async handler
dispatcher = new AsyncDispatcher("RM StateStore dispatcher");
dispatcher.init(conf);
rmStateStoreEventHandler = new ForwardingEventHandler();
dispatcher.register(RMStateStoreEventType.class, rmStateStoreEventHandler);
dispatcher.setDrainEventsOnStop();
initInternal(conf);
}
Aggregations