use of org.apache.hadoop.security.AccessControlException in project hbase by apache.
the class HBaseFsck method preCheckPermission.
private void preCheckPermission() throws IOException {
if (shouldIgnorePreCheckPermission()) {
return;
}
Path hbaseDir = CommonFSUtils.getRootDir(getConf());
FileSystem fs = hbaseDir.getFileSystem(getConf());
UserProvider userProvider = UserProvider.instantiate(getConf());
UserGroupInformation ugi = userProvider.getCurrent().getUGI();
FileStatus[] files = fs.listStatus(hbaseDir);
for (FileStatus file : files) {
try {
fs.access(file.getPath(), FsAction.WRITE);
} catch (AccessControlException ace) {
LOG.warn("Got AccessDeniedException when preCheckPermission ", ace);
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName() + " does not have write perms to " + file.getPath() + ". Please rerun hbck as hdfs user " + file.getOwner());
throw ace;
}
}
}
use of org.apache.hadoop.security.AccessControlException in project hive by apache.
the class TestReplChangeManager method testRecycleUsingImpersonation.
@Test
public void testRecycleUsingImpersonation() throws Exception {
FileSystem fs = warehouse.getWhRoot().getFileSystem(hiveConf);
Path dirDb = new Path(warehouse.getWhRoot(), "db3");
long now = System.currentTimeMillis();
fs.delete(dirDb, true);
fs.mkdirs(dirDb);
Path dirTbl1 = new Path(dirDb, "tbl1");
fs.mkdirs(dirTbl1);
Path part11 = new Path(dirTbl1, "part1");
createFile(part11, "testClearer11");
String fileChksum11 = ReplChangeManager.checksumFor(part11, fs);
Path part12 = new Path(dirTbl1, "part2");
createFile(part12, "testClearer12");
String fileChksum12 = ReplChangeManager.checksumFor(part12, fs);
final UserGroupInformation proxyUserUgi = UserGroupInformation.createRemoteUser("impala");
setGroupsInConf(UserGroupInformation.getCurrentUser().getGroupNames(), "impala", hiveConf);
// set owner of data path to impala
fs.setOwner(dirTbl1, "impala", "default");
fs.setOwner(part11, "impala", "default");
fs.setOwner(part12, "impala", "default");
proxyUserUgi.doAs((PrivilegedExceptionAction<Void>) () -> {
try {
// impala doesn't have access. Should provide access control exception
ReplChangeManager.getInstance(hiveConf).recycle(dirTbl1, RecycleType.MOVE, false);
Assert.fail();
} catch (AccessControlException e) {
assertTrue(e.getMessage().contains("Permission denied: user=impala, access=EXECUTE"));
assertTrue(e.getMessage().contains("/cmroot"));
}
return null;
});
ReplChangeManager.getInstance().recycle(dirTbl1, RecycleType.MOVE, false);
Assert.assertFalse(fs.exists(part11));
Assert.assertFalse(fs.exists(part12));
assertTrue(fs.exists(ReplChangeManager.getCMPath(hiveConf, part11.getName(), fileChksum11, cmroot)));
assertTrue(fs.exists(ReplChangeManager.getCMPath(hiveConf, part12.getName(), fileChksum12, cmroot)));
fs.setTimes(ReplChangeManager.getCMPath(hiveConf, part11.getName(), fileChksum11, cmroot), now - 7 * 86400 * 1000 * 2, now - 7 * 86400 * 1000 * 2);
ReplChangeManager.scheduleCMClearer(hiveConf);
long start = System.currentTimeMillis();
long end;
boolean cleared = false;
do {
Thread.sleep(200);
end = System.currentTimeMillis();
if (end - start > 5000) {
Assert.fail("timeout, cmroot has not been cleared");
}
if (!fs.exists(ReplChangeManager.getCMPath(hiveConf, part11.getName(), fileChksum11, cmroot)) && fs.exists(ReplChangeManager.getCMPath(hiveConf, part12.getName(), fileChksum12, cmroot))) {
cleared = true;
}
} while (!cleared);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class LeafQueue method validateSubmitApplication.
public void validateSubmitApplication(ApplicationId applicationId, String userName, String queue) throws AccessControlException {
try {
writeLock.lock();
// Check if the queue is accepting jobs
if (getState() != QueueState.RUNNING) {
String msg = "Queue " + getQueuePath() + " is STOPPED. Cannot accept submission of application: " + applicationId;
LOG.info(msg);
throw new AccessControlException(msg);
}
// Check submission limits for queues
if (getNumApplications() >= getMaxApplications()) {
String msg = "Queue " + getQueuePath() + " already has " + getNumApplications() + " applications," + " cannot accept submission of application: " + applicationId;
LOG.info(msg);
throw new AccessControlException(msg);
}
// Check submission limits for the user on this queue
User user = usersManager.getUserAndAddIfAbsent(userName);
if (user.getTotalApplications() >= getMaxApplicationsPerUser()) {
String msg = "Queue " + getQueuePath() + " already has " + user.getTotalApplications() + " applications from user " + userName + " cannot accept submission of application: " + applicationId;
LOG.info(msg);
throw new AccessControlException(msg);
}
} finally {
writeLock.unlock();
}
try {
getParent().validateSubmitApplication(applicationId, userName, queue);
} catch (AccessControlException ace) {
LOG.info("Failed to submit application to parent-queue: " + getParent().getQueuePath(), ace);
throw ace;
}
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class CapacityScheduler method preValidateMoveApplication.
@Override
public void preValidateMoveApplication(ApplicationId appId, String newQueue) throws YarnException {
try {
writeLock.lock();
SchedulerApplication<FiCaSchedulerApp> application = applications.get(appId);
if (application == null) {
throw new YarnException("App to be moved " + appId + " not found.");
}
String sourceQueueName = application.getQueue().getQueueName();
this.queueManager.getAndCheckLeafQueue(sourceQueueName);
String destQueueName = handleMoveToPlanQueue(newQueue);
LeafQueue dest = this.queueManager.getAndCheckLeafQueue(destQueueName);
// Validation check - ACLs, submission limits for user & queue
String user = application.getUser();
// Check active partition only when attempt is available
FiCaSchedulerApp appAttempt = getApplicationAttempt(ApplicationAttemptId.newInstance(appId, 0));
if (null != appAttempt) {
checkQueuePartition(appAttempt, dest);
}
try {
dest.validateSubmitApplication(appId, user, destQueueName);
} catch (AccessControlException e) {
throw new YarnException(e);
}
} finally {
writeLock.unlock();
}
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class CapacityScheduler method addApplication.
private void addApplication(ApplicationId applicationId, String queueName, String user, Priority priority) {
try {
writeLock.lock();
if (isSystemAppsLimitReached()) {
String message = "Maximum system application limit reached," + "cannot accept submission of application: " + applicationId;
this.rmContext.getDispatcher().getEventHandler().handle(new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED, message));
return;
}
// Sanity checks.
CSQueue queue = getQueue(queueName);
if (queue == null) {
String message = "Application " + applicationId + " submitted by user " + user + " to unknown queue: " + queueName;
this.rmContext.getDispatcher().getEventHandler().handle(new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED, message));
return;
}
if (!(queue instanceof LeafQueue)) {
String message = "Application " + applicationId + " submitted by user " + user + " to non-leaf queue: " + queueName;
this.rmContext.getDispatcher().getEventHandler().handle(new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED, message));
return;
}
// Submit to the queue
try {
queue.submitApplication(applicationId, user, queueName);
} catch (AccessControlException ace) {
LOG.info("Failed to submit application " + applicationId + " to queue " + queueName + " from user " + user, ace);
this.rmContext.getDispatcher().getEventHandler().handle(new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED, ace.toString()));
return;
}
// update the metrics
queue.getMetrics().submitApp(user);
SchedulerApplication<FiCaSchedulerApp> application = new SchedulerApplication<FiCaSchedulerApp>(queue, user, priority);
applications.put(applicationId, application);
LOG.info("Accepted application " + applicationId + " from user: " + user + ", in queue: " + queueName);
rmContext.getDispatcher().getEventHandler().handle(new RMAppEvent(applicationId, RMAppEventType.APP_ACCEPTED));
} finally {
writeLock.unlock();
}
}
Aggregations