use of java.security.AccessControlException in project hadoop by apache.
the class HttpFSServer method get.
/**
* Binding to handle GET requests, supported operations are
*
* @param path the path for operation.
* @param op the HttpFS operation of the request.
* @param params the HttpFS parameters of the request.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated
* error occurred. Thrown exceptions are handled by
* {@link HttpFSExceptionProvider}.
*/
@GET
@Path("{path:.*}")
@Produces({ MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8 })
public Response get(@PathParam("path") String path, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException {
UserGroupInformation user = HttpUserGroupInformation.get();
Response response;
path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
MDC.put("hostname", request.getRemoteAddr());
switch(op.value()) {
case OPEN:
{
//Invoking the command directly using an unmanaged FileSystem that is
// released by the FileSystemReleaseFilter
final FSOperations.FSOpen command = new FSOperations.FSOpen(path);
final FileSystem fs = createFileSystem(user);
InputStream is = null;
UserGroupInformation ugi = UserGroupInformation.createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser());
try {
is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() {
@Override
public InputStream run() throws Exception {
return command.execute(fs);
}
});
} catch (InterruptedException ie) {
LOG.info("Open interrupted.", ie);
Thread.currentThread().interrupt();
}
Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
Long len = params.get(LenParam.NAME, LenParam.class);
AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len });
InputStreamEntity entity = new InputStreamEntity(is, offset, len);
response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
break;
}
case GETFILESTATUS:
{
FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS:
{
String filter = params.get(FilterParam.NAME, FilterParam.class);
FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETHOMEDIRECTORY:
{
enforceRootPath(op.value(), path);
FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case INSTRUMENTATION:
{
enforceRootPath(op.value(), path);
Groups groups = HttpFSServerWebApp.get().get(Groups.class);
List<String> userGroups = groups.getGroups(user.getShortUserName());
if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
throw new AccessControlException("User not in HttpFSServer admin group");
}
Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class);
Map snapshot = instrumentation.getSnapshot();
response = Response.ok(snapshot).build();
break;
}
case GETCONTENTSUMMARY:
{
FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILECHECKSUM:
{
FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILEBLOCKLOCATIONS:
{
response = Response.status(Response.Status.BAD_REQUEST).build();
break;
}
case GETACLSTATUS:
{
FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("ACL status for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETXATTRS:
{
List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class);
XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class);
FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttrs for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTXATTRS:
{
FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttr names for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS_BATCH:
{
String startAfter = params.get(HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class);
byte[] token = HttpFSUtils.EMPTY_BYTES;
if (startAfter != null) {
token = startAfter.getBytes(Charsets.UTF_8);
}
FSOperations.FSListStatusBatch command = new FSOperations.FSListStatusBatch(path, token);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] token [{}]", path, token);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETTRASHROOT:
{
FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETALLSTORAGEPOLICY:
{
FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies();
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSTORAGEPOLICY:
{
FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
default:
{
throw new IOException(MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value()));
}
}
return response;
}
use of java.security.AccessControlException in project hadoop by apache.
the class ClientRMService method moveApplicationAcrossQueues.
@SuppressWarnings("unchecked")
@Override
public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest request) throws YarnException {
ApplicationId applicationId = request.getApplicationId();
UserGroupInformation callerUGI;
try {
callerUGI = UserGroupInformation.getCurrentUser();
} catch (IOException ie) {
LOG.info("Error getting UGI ", ie);
RMAuditLogger.logFailure("UNKNOWN", AuditConstants.MOVE_APP_REQUEST, "UNKNOWN", "ClientRMService", "Error getting UGI", applicationId);
throw RPCUtil.getRemoteException(ie);
}
RMApp application = this.rmContext.getRMApps().get(applicationId);
if (application == null) {
RMAuditLogger.logFailure(callerUGI.getUserName(), AuditConstants.MOVE_APP_REQUEST, "UNKNOWN", "ClientRMService", "Trying to move an absent application", applicationId);
throw new ApplicationNotFoundException("Trying to move an absent" + " application " + applicationId);
}
if (!checkAccess(callerUGI, application.getUser(), ApplicationAccessType.MODIFY_APP, application)) {
RMAuditLogger.logFailure(callerUGI.getShortUserName(), AuditConstants.MOVE_APP_REQUEST, "User doesn't have permissions to " + ApplicationAccessType.MODIFY_APP.toString(), "ClientRMService", AuditConstants.UNAUTHORIZED_USER, applicationId);
throw RPCUtil.getRemoteException(new AccessControlException("User " + callerUGI.getShortUserName() + " cannot perform operation " + ApplicationAccessType.MODIFY_APP.name() + " on " + applicationId));
}
String targetQueue = request.getTargetQueue();
if (!accessToTargetQueueAllowed(callerUGI, application, targetQueue)) {
RMAuditLogger.logFailure(callerUGI.getShortUserName(), AuditConstants.MOVE_APP_REQUEST, "Target queue doesn't exist or user" + " doesn't have permissions to submit to target queue: " + targetQueue, "ClientRMService", AuditConstants.UNAUTHORIZED_USER, applicationId);
throw RPCUtil.getRemoteException(new AccessControlException("User " + callerUGI.getShortUserName() + " cannot submit applications to" + " target queue or the target queue doesn't exist: " + targetQueue + " while moving " + applicationId));
}
// state.
if (!ACTIVE_APP_STATES.contains(application.getState())) {
String msg = "App in " + application.getState() + " state cannot be moved.";
RMAuditLogger.logFailure(callerUGI.getShortUserName(), AuditConstants.MOVE_APP_REQUEST, "UNKNOWN", "ClientRMService", msg);
throw new YarnException(msg);
}
try {
this.rmAppManager.moveApplicationAcrossQueue(application.getApplicationId(), request.getTargetQueue());
} catch (YarnException ex) {
RMAuditLogger.logFailure(callerUGI.getShortUserName(), AuditConstants.MOVE_APP_REQUEST, "UNKNOWN", "ClientRMService", ex.getMessage());
throw ex;
}
RMAuditLogger.logSuccess(callerUGI.getShortUserName(), AuditConstants.MOVE_APP_REQUEST, "ClientRMService", applicationId);
MoveApplicationAcrossQueuesResponse response = recordFactory.newRecordInstance(MoveApplicationAcrossQueuesResponse.class);
return response;
}
use of java.security.AccessControlException in project hadoop by apache.
the class ClientRMService method signalToContainer.
/**
* Signal a container.
* After the request passes some sanity check, it will be delivered
* to RMNodeImpl so that the next NM heartbeat will pick up the signal request
*/
@SuppressWarnings("unchecked")
@Override
public SignalContainerResponse signalToContainer(SignalContainerRequest request) throws YarnException, IOException {
ContainerId containerId = request.getContainerId();
UserGroupInformation callerUGI;
try {
callerUGI = UserGroupInformation.getCurrentUser();
} catch (IOException ie) {
LOG.info("Error getting UGI ", ie);
throw RPCUtil.getRemoteException(ie);
}
ApplicationId applicationId = containerId.getApplicationAttemptId().getApplicationId();
RMApp application = this.rmContext.getRMApps().get(applicationId);
if (application == null) {
RMAuditLogger.logFailure(callerUGI.getUserName(), AuditConstants.SIGNAL_CONTAINER, "UNKNOWN", "ClientRMService", "Trying to signal an absent container", applicationId, containerId, null);
throw RPCUtil.getRemoteException("Trying to signal an absent container " + containerId);
}
if (!checkAccess(callerUGI, application.getUser(), ApplicationAccessType.MODIFY_APP, application)) {
RMAuditLogger.logFailure(callerUGI.getShortUserName(), AuditConstants.SIGNAL_CONTAINER, "User doesn't have permissions to " + ApplicationAccessType.MODIFY_APP.toString(), "ClientRMService", AuditConstants.UNAUTHORIZED_USER, applicationId);
throw RPCUtil.getRemoteException(new AccessControlException("User " + callerUGI.getShortUserName() + " cannot perform operation " + ApplicationAccessType.MODIFY_APP.name() + " on " + applicationId));
}
RMContainer container = scheduler.getRMContainer(containerId);
if (container != null) {
this.rmContext.getDispatcher().getEventHandler().handle(new RMNodeSignalContainerEvent(container.getContainer().getNodeId(), request));
RMAuditLogger.logSuccess(callerUGI.getShortUserName(), AuditConstants.SIGNAL_CONTAINER, "ClientRMService", applicationId, containerId, null);
} else {
RMAuditLogger.logFailure(callerUGI.getUserName(), AuditConstants.SIGNAL_CONTAINER, "UNKNOWN", "ClientRMService", "Trying to signal an absent container", applicationId, containerId, null);
throw RPCUtil.getRemoteException("Trying to signal an absent container " + containerId);
}
return recordFactory.newRecordInstance(SignalContainerResponse.class);
}
use of java.security.AccessControlException in project hadoop by apache.
the class RMWebServices method modifyApplicationPriority.
private Response modifyApplicationPriority(final RMApp app, UserGroupInformation callerUGI, final int appPriority) throws IOException, InterruptedException {
String userName = callerUGI.getUserName();
try {
callerUGI.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException, YarnException {
Priority priority = Priority.newInstance(appPriority);
UpdateApplicationPriorityRequest request = UpdateApplicationPriorityRequest.newInstance(app.getApplicationId(), priority);
rm.getClientRMService().updateApplicationPriority(request);
return null;
}
});
} catch (UndeclaredThrowableException ue) {
// bubble that up to the user
if (ue.getCause() instanceof YarnException) {
YarnException ye = (YarnException) ue.getCause();
if (ye.getCause() instanceof AccessControlException) {
String appId = app.getApplicationId().toString();
String msg = "Unauthorized attempt to change priority of appid " + appId + " by remote user " + userName;
return Response.status(Status.FORBIDDEN).entity(msg).build();
} else if (ye.getMessage().startsWith("Application in") && ye.getMessage().endsWith("state cannot be update priority.")) {
return Response.status(Status.BAD_REQUEST).entity(ye.getMessage()).build();
} else {
throw ue;
}
} else {
throw ue;
}
}
AppPriority ret = new AppPriority(app.getApplicationPriority().getPriority());
return Response.status(Status.OK).entity(ret).build();
}
use of java.security.AccessControlException in project hadoop by apache.
the class TestClientRMService method testMoveApplicationAdminTargetQueue.
@Test
public void testMoveApplicationAdminTargetQueue() throws Exception {
ApplicationId applicationId = getApplicationId(1);
UserGroupInformation aclUGI = UserGroupInformation.getCurrentUser();
QueueACLsManager queueAclsManager = getQueueAclManager("allowed_queue", QueueACL.ADMINISTER_QUEUE, aclUGI);
ApplicationACLsManager appAclsManager = getAppAclManager();
ClientRMService rmService = createClientRMServiceForMoveApplicationRequest(applicationId, aclUGI.getShortUserName(), appAclsManager, queueAclsManager);
// user is admin move to queue in acl
MoveApplicationAcrossQueuesRequest moveAppRequest = MoveApplicationAcrossQueuesRequest.newInstance(applicationId, "allowed_queue");
rmService.moveApplicationAcrossQueues(moveAppRequest);
// user is admin move to queue not in acl
moveAppRequest = MoveApplicationAcrossQueuesRequest.newInstance(applicationId, "not_allowed");
try {
rmService.moveApplicationAcrossQueues(moveAppRequest);
Assert.fail("The request should fail with an AccessControlException");
} catch (YarnException rex) {
Assert.assertTrue("AccessControlException is expected", rex.getCause() instanceof AccessControlException);
}
// ACL is owned by "moveuser", move is performed as a different user
aclUGI = UserGroupInformation.createUserForTesting("moveuser", new String[] {});
queueAclsManager = getQueueAclManager("move_queue", QueueACL.ADMINISTER_QUEUE, aclUGI);
appAclsManager = getAppAclManager();
ClientRMService rmService2 = createClientRMServiceForMoveApplicationRequest(applicationId, aclUGI.getShortUserName(), appAclsManager, queueAclsManager);
// no access to this queue
MoveApplicationAcrossQueuesRequest moveAppRequest2 = MoveApplicationAcrossQueuesRequest.newInstance(applicationId, "move_queue");
try {
rmService2.moveApplicationAcrossQueues(moveAppRequest2);
Assert.fail("The request should fail with an AccessControlException");
} catch (YarnException rex) {
Assert.assertTrue("AccessControlException is expected", rex.getCause() instanceof AccessControlException);
}
// execute the move as the acl owner
// access to the queue OK: user allowed in this queue
aclUGI.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
return rmService2.moveApplicationAcrossQueues(moveAppRequest2);
}
});
}
Aggregations