use of org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation in project hadoop by apache.
the class LinuxContainerExecutor method startLocalizer.
@Override
public void startLocalizer(LocalizerStartContext ctx) throws IOException, InterruptedException {
Path nmPrivateContainerTokensPath = ctx.getNmPrivateContainerTokens();
InetSocketAddress nmAddr = ctx.getNmAddr();
String user = ctx.getUser();
String appId = ctx.getAppId();
String locId = ctx.getLocId();
LocalDirsHandlerService dirsHandler = ctx.getDirsHandler();
List<String> localDirs = dirsHandler.getLocalDirs();
List<String> logDirs = dirsHandler.getLogDirs();
verifyUsernamePattern(user);
String runAsUser = getRunAsUser(user);
PrivilegedOperation initializeContainerOp = new PrivilegedOperation(PrivilegedOperation.OperationType.INITIALIZE_CONTAINER);
List<String> prefixCommands = new ArrayList<>();
addSchedPriorityCommand(prefixCommands);
initializeContainerOp.appendArgs(runAsUser, user, Integer.toString(PrivilegedOperation.RunAsUserCommand.INITIALIZE_CONTAINER.getValue()), appId, nmPrivateContainerTokensPath.toUri().getPath().toString(), StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, localDirs), StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, logDirs));
// use same jvm as parent
File jvm = new File(new File(System.getProperty("java.home"), "bin"), "java");
initializeContainerOp.appendArgs(jvm.toString());
initializeContainerOp.appendArgs("-classpath");
initializeContainerOp.appendArgs(System.getProperty("java.class.path"));
String javaLibPath = System.getProperty("java.library.path");
if (javaLibPath != null) {
initializeContainerOp.appendArgs("-Djava.library.path=" + javaLibPath);
}
initializeContainerOp.appendArgs(ContainerLocalizer.getJavaOpts(getConf()));
List<String> localizerArgs = new ArrayList<>();
buildMainArgs(localizerArgs, user, appId, locId, nmAddr, localDirs);
initializeContainerOp.appendArgs(localizerArgs);
try {
Configuration conf = super.getConf();
PrivilegedOperationExecutor privilegedOperationExecutor = PrivilegedOperationExecutor.getInstance(conf);
privilegedOperationExecutor.executePrivilegedOperation(prefixCommands, initializeContainerOp, null, null, false, true);
} catch (PrivilegedOperationException e) {
int exitCode = e.getExitCode();
LOG.warn("Exit code from container " + locId + " startLocalizer is : " + exitCode, e);
throw new IOException("Application " + appId + " initialization failed" + " (exitCode=" + exitCode + ") with output: " + e.getOutput(), e);
}
}
use of org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation in project hadoop by apache.
the class CGroupsCpuResourceHandlerImpl method preStart.
@Override
public List<PrivilegedOperation> preStart(Container container) throws ResourceHandlerException {
String cgroupId = container.getContainerId().toString();
Resource containerResource = container.getResource();
cGroupsHandler.createCGroup(CPU, cgroupId);
try {
int containerVCores = containerResource.getVirtualCores();
int cpuShares = CPU_DEFAULT_WEIGHT * containerVCores;
cGroupsHandler.updateCGroupParam(CPU, cgroupId, CGroupsHandler.CGROUP_CPU_SHARES, String.valueOf(cpuShares));
if (strictResourceUsageMode) {
if (nodeVCores != containerVCores) {
float containerCPU = (containerVCores * yarnProcessors) / (float) nodeVCores;
int[] limits = getOverallLimits(containerCPU);
cGroupsHandler.updateCGroupParam(CPU, cgroupId, CGroupsHandler.CGROUP_CPU_PERIOD_US, String.valueOf(limits[0]));
cGroupsHandler.updateCGroupParam(CPU, cgroupId, CGroupsHandler.CGROUP_CPU_QUOTA_US, String.valueOf(limits[1]));
}
}
} catch (ResourceHandlerException re) {
cGroupsHandler.deleteCGroup(CPU, cgroupId);
LOG.warn("Could not update cgroup for container", re);
throw re;
}
List<PrivilegedOperation> ret = new ArrayList<>();
ret.add(new PrivilegedOperation(PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupsHandler.getPathForCGroupTasks(CPU, cgroupId)));
return ret;
}
use of org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation in project hadoop by apache.
the class CGroupsHandlerImpl method mountCGroupController.
private void mountCGroupController(CGroupController controller) throws ResourceHandlerException {
String path = getControllerPath(controller);
if (path == null) {
try {
//lock out other readers/writers till we are done
rwLock.writeLock().lock();
String hierarchy = cGroupPrefix;
StringBuffer controllerPath = new StringBuffer().append(cGroupMountPath).append('/').append(controller.getName());
StringBuffer cGroupKV = new StringBuffer().append(controller.getName()).append('=').append(controllerPath);
PrivilegedOperation.OperationType opType = PrivilegedOperation.OperationType.MOUNT_CGROUPS;
PrivilegedOperation op = new PrivilegedOperation(opType);
op.appendArgs(hierarchy, cGroupKV.toString());
LOG.info("Mounting controller " + controller.getName() + " at " + controllerPath);
privilegedOperationExecutor.executePrivilegedOperation(op, false);
//if privileged operation succeeds, update controller paths
controllerPaths.put(controller, controllerPath.toString());
return;
} catch (PrivilegedOperationException e) {
LOG.error("Failed to mount controller: " + controller.getName());
throw new ResourceHandlerException("Failed to mount controller: " + controller.getName());
} finally {
rwLock.writeLock().unlock();
}
} else {
LOG.info("CGroup controller already mounted at: " + path);
return;
}
}
use of org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation in project hadoop by apache.
the class TrafficControlBandwidthHandlerImpl method postComplete.
/**
* Cleanup operations once container is completed - deletes cgroup and
* removes traffic shaping rule(s).
* @param containerId of the container that was completed.
* @return
* @throws ResourceHandlerException
*/
@Override
public List<PrivilegedOperation> postComplete(ContainerId containerId) throws ResourceHandlerException {
LOG.info("postComplete for container: " + containerId.toString());
cGroupsHandler.deleteCGroup(CGroupsHandler.CGroupController.NET_CLS, containerId.toString());
Integer classId = containerIdClassIdMap.get(containerId);
if (classId != null) {
PrivilegedOperation op = trafficController.new BatchBuilder(PrivilegedOperation.OperationType.TC_MODIFY_STATE).deleteContainerClass(classId).commitBatchToTempFile();
try {
privilegedOperationExecutor.executePrivilegedOperation(op, false);
trafficController.releaseClassId(classId);
} catch (PrivilegedOperationException e) {
LOG.warn("Failed to delete tc rule for classId: " + classId);
throw new ResourceHandlerException("Failed to delete tc rule for classId:" + classId);
}
} else {
LOG.warn("Not cleaning up tc rules. classId unknown for container: " + containerId.toString());
}
return null;
}
use of org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation in project hadoop by apache.
the class TrafficControlBandwidthHandlerImpl method preStart.
/**
* Pre-start hook for 'outbound-bandwidth' resource. A cgroup is created
* and a net_cls classid is generated and written to a cgroup file. A
* traffic control shaping rule is created in order to limit outbound
* bandwidth utilization.
* @param container Container being launched
* @return privileged operations for some cgroups/tc operations.
* @throws ResourceHandlerException
*/
@Override
public List<PrivilegedOperation> preStart(Container container) throws ResourceHandlerException {
String containerIdStr = container.getContainerId().toString();
int classId = trafficController.getNextClassId();
String classIdStr = trafficController.getStringForNetClsClassId(classId);
cGroupsHandler.createCGroup(CGroupsHandler.CGroupController.NET_CLS, containerIdStr);
cGroupsHandler.updateCGroupParam(CGroupsHandler.CGroupController.NET_CLS, containerIdStr, CGroupsHandler.CGROUP_PARAM_CLASSID, classIdStr);
containerIdClassIdMap.put(container.getContainerId(), classId);
//Now create a privileged operation in order to update the tasks file with
//the pid of the running container process (root of process tree). This can
//only be done at the time of launching the container, in a privileged
//executable.
String tasksFile = cGroupsHandler.getPathForCGroupTasks(CGroupsHandler.CGroupController.NET_CLS, containerIdStr);
String opArg = new StringBuffer(PrivilegedOperation.CGROUP_ARG_PREFIX).append(tasksFile).toString();
List<PrivilegedOperation> ops = new ArrayList<>();
ops.add(new PrivilegedOperation(PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, opArg));
//Create a privileged operation to create a tc rule for this container
//We'll return this to the calling (Linux) Container Executor
//implementation for batching optimizations so that we don't fork/exec
//additional times during container launch.
TrafficController.BatchBuilder builder = trafficController.new BatchBuilder(PrivilegedOperation.OperationType.TC_MODIFY_STATE);
builder.addContainerClass(classId, containerBandwidthMbit, strictMode);
ops.add(builder.commitBatchToTempFile());
return ops;
}
Aggregations