use of org.apache.hadoop.yarn.server.security.ApplicationACLsManager in project hadoop by apache.
the class TestLocalCacheDirectoryManager method testMinimumPerDirectoryFileLimit.
@Test
public void testMinimumPerDirectoryFileLimit() {
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY, "1");
Exception e = null;
NMContext nmContext = new NMContext(new NMContainerTokenSecretManager(conf), new NMTokenSecretManagerInNM(), null, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
ResourceLocalizationService service = new ResourceLocalizationService(null, null, null, null, nmContext);
try {
service.init(conf);
} catch (Exception e1) {
e = e1;
}
Assert.assertNotNull(e);
Assert.assertEquals(YarnRuntimeException.class, e.getClass());
Assert.assertEquals(e.getMessage(), YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY + " parameter is configured with a value less than 37.");
}
use of org.apache.hadoop.yarn.server.security.ApplicationACLsManager in project hadoop by apache.
the class TestNMWebServer method testNMWebApp.
@Test
public void testNMWebApp() throws IOException, YarnException {
Configuration conf = new Configuration();
Context nmContext = new NodeManager.NMContext(null, null, null, null, null, false, conf);
ResourceView resourceView = new ResourceView() {
@Override
public long getVmemAllocatedForContainers() {
return 0;
}
@Override
public long getPmemAllocatedForContainers() {
return 0;
}
@Override
public long getVCoresAllocatedForContainers() {
return 0;
}
@Override
public boolean isVmemCheckEnabled() {
return true;
}
@Override
public boolean isPmemCheckEnabled() {
return true;
}
};
conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath());
NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
WebServer server = new WebServer(nmContext, resourceView, new ApplicationACLsManager(conf), dirsHandler);
server.init(conf);
server.start();
// Add an application and the corresponding containers
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
Dispatcher dispatcher = new AsyncDispatcher();
String user = "nobody";
long clusterTimeStamp = 1234;
ApplicationId appId = BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
Application app = mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
nmContext.getApplications().put(appId, app);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
ContainerId container2 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 1);
NodeManagerMetrics metrics = mock(NodeManagerMetrics.class);
NMStateStoreService stateStore = new NMNullStateStoreService();
for (ContainerId containerId : new ContainerId[] { container1, container2 }) {
// TODO: Use builder utils
ContainerLaunchContext launchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
long currentTime = System.currentTimeMillis();
Token containerToken = BuilderUtils.newContainerToken(containerId, 0, "127.0.0.1", 1234, user, BuilderUtils.newResource(1024, 1), currentTime + 10000L, 123, "password".getBytes(), currentTime);
Context context = mock(Context.class);
Container container = new ContainerImpl(conf, dispatcher, launchContext, null, metrics, BuilderUtils.newContainerTokenIdentifier(containerToken), context) {
@Override
public ContainerState getContainerState() {
return ContainerState.RUNNING;
}
;
};
nmContext.getContainers().put(containerId, container);
//TODO: Gross hack. Fix in code.
ApplicationId applicationId = containerId.getApplicationAttemptId().getApplicationId();
nmContext.getApplications().get(applicationId).getContainers().put(containerId, container);
writeContainerLogs(nmContext, containerId, dirsHandler);
}
// TODO: Pull logs and test contents.
// Thread.sleep(1000000);
}
use of org.apache.hadoop.yarn.server.security.ApplicationACLsManager in project hadoop by apache.
the class TestNMAppsPage method testNMAppsPage.
@Test
public void testNMAppsPage() {
Configuration conf = new Configuration();
final NMContext nmcontext = new NMContext(new NMContainerTokenSecretManager(conf), new NMTokenSecretManagerInNM(), null, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
Injector injector = WebAppTests.createMockInjector(NMContext.class, nmcontext, new Module() {
@Override
public void configure(Binder binder) {
NodeManager nm = TestNMAppsPage.mocknm(nmcontext);
binder.bind(NodeManager.class).toInstance(nm);
binder.bind(Context.class).toInstance(nmcontext);
}
});
ApplicationBlock instance = injector.getInstance(ApplicationBlock.class);
instance.set(YarnWebParams.APPLICATION_ID, applicationid);
instance.render();
}
use of org.apache.hadoop.yarn.server.security.ApplicationACLsManager in project hadoop by apache.
the class TestNMWebServer method startNMWebAppServer.
private int startNMWebAppServer(String webAddr) {
Configuration conf = new Configuration();
Context nmContext = new NodeManager.NMContext(null, null, null, null, null, false, conf);
ResourceView resourceView = new ResourceView() {
@Override
public long getVmemAllocatedForContainers() {
return 0;
}
@Override
public long getPmemAllocatedForContainers() {
return 0;
}
@Override
public long getVCoresAllocatedForContainers() {
return 0;
}
@Override
public boolean isVmemCheckEnabled() {
return true;
}
@Override
public boolean isPmemCheckEnabled() {
return true;
}
};
conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath());
NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
conf.set(YarnConfiguration.NM_WEBAPP_ADDRESS, webAddr);
WebServer server = new WebServer(nmContext, resourceView, new ApplicationACLsManager(conf), dirsHandler);
try {
server.init(conf);
server.start();
return server.getPort();
} finally {
server.stop();
healthChecker.stop();
}
}
use of org.apache.hadoop.yarn.server.security.ApplicationACLsManager in project hadoop by apache.
the class AggregatedLogsBlock method render.
@Override
protected void render(Block html) {
ContainerId containerId = verifyAndGetContainerId(html);
NodeId nodeId = verifyAndGetNodeId(html);
String appOwner = verifyAndGetAppOwner(html);
LogLimits logLimits = verifyAndGetLogLimits(html);
if (containerId == null || nodeId == null || appOwner == null || appOwner.isEmpty() || logLimits == null) {
return;
}
ApplicationId applicationId = containerId.getApplicationAttemptId().getApplicationId();
String logEntity = $(ENTITY_STRING);
if (logEntity == null || logEntity.isEmpty()) {
logEntity = containerId.toString();
}
String nmApplicationLogUrl = getApplicationLogURL(applicationId);
if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
html.h1()._("Aggregation is not enabled. Try the nodemanager at " + nodeId)._();
if (nmApplicationLogUrl != null) {
html.h1()._("Or see application log at " + nmApplicationLogUrl)._();
}
return;
}
Path remoteRootLogDir = new Path(conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
Path remoteAppDir = LogAggregationUtils.getRemoteAppLogDir(remoteRootLogDir, applicationId, appOwner, LogAggregationUtils.getRemoteNodeLogDirSuffix(conf));
RemoteIterator<FileStatus> nodeFiles;
try {
Path qualifiedLogDir = FileContext.getFileContext(conf).makeQualified(remoteAppDir);
nodeFiles = FileContext.getFileContext(qualifiedLogDir.toUri(), conf).listStatus(remoteAppDir);
} catch (FileNotFoundException fnf) {
html.h1()._("Logs not available for " + logEntity + ". Aggregation may not be complete, " + "Check back later or try the nodemanager at " + nodeId)._();
if (nmApplicationLogUrl != null) {
html.h1()._("Or see application log at " + nmApplicationLogUrl)._();
}
return;
} catch (Exception ex) {
html.h1()._("Error getting logs at " + nodeId)._();
return;
}
boolean foundLog = false;
String desiredLogType = $(CONTAINER_LOG_TYPE);
try {
while (nodeFiles.hasNext()) {
AggregatedLogFormat.LogReader reader = null;
try {
FileStatus thisNodeFile = nodeFiles.next();
if (thisNodeFile.getPath().getName().equals(applicationId + ".har")) {
Path p = new Path("har:///" + thisNodeFile.getPath().toUri().getRawPath());
nodeFiles = HarFs.get(p.toUri(), conf).listStatusIterator(p);
continue;
}
if (!thisNodeFile.getPath().getName().contains(LogAggregationUtils.getNodeString(nodeId)) || thisNodeFile.getPath().getName().endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
continue;
}
long logUploadedTime = thisNodeFile.getModificationTime();
reader = new AggregatedLogFormat.LogReader(conf, thisNodeFile.getPath());
String owner = null;
Map<ApplicationAccessType, String> appAcls = null;
try {
owner = reader.getApplicationOwner();
appAcls = reader.getApplicationAcls();
} catch (IOException e) {
LOG.error("Error getting logs for " + logEntity, e);
continue;
}
ApplicationACLsManager aclsManager = new ApplicationACLsManager(conf);
aclsManager.addApplication(applicationId, appAcls);
String remoteUser = request().getRemoteUser();
UserGroupInformation callerUGI = null;
if (remoteUser != null) {
callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
}
if (callerUGI != null && !aclsManager.checkAccess(callerUGI, ApplicationAccessType.VIEW_APP, owner, applicationId)) {
html.h1()._("User [" + remoteUser + "] is not authorized to view the logs for " + logEntity + " in log file [" + thisNodeFile.getPath().getName() + "]")._();
LOG.error("User [" + remoteUser + "] is not authorized to view the logs for " + logEntity);
continue;
}
AggregatedLogFormat.ContainerLogsReader logReader = reader.getContainerLogsReader(containerId);
if (logReader == null) {
continue;
}
foundLog = readContainerLogs(html, logReader, logLimits, desiredLogType, logUploadedTime);
} catch (IOException ex) {
LOG.error("Error getting logs for " + logEntity, ex);
continue;
} finally {
if (reader != null)
reader.close();
}
}
if (!foundLog) {
if (desiredLogType.isEmpty()) {
html.h1("No logs available for container " + containerId.toString());
} else {
html.h1("Unable to locate '" + desiredLogType + "' log for container " + containerId.toString());
}
}
} catch (IOException e) {
html.h1()._("Error getting logs for " + logEntity)._();
LOG.error("Error getting logs for " + logEntity, e);
}
}
Aggregations