use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSAclBaseTest method testEffectiveAccess.
@Test
public void testEffectiveAccess() throws Exception {
Path p1 = new Path("/testEffectiveAccess");
fs.mkdirs(p1);
// give all access at first
fs.setPermission(p1, FsPermission.valueOf("-rwxrwxrwx"));
AclStatus aclStatus = fs.getAclStatus(p1);
assertEquals("Entries should be empty", 0, aclStatus.getEntries().size());
assertEquals("Permission should be carried by AclStatus", fs.getFileStatus(p1).getPermission(), aclStatus.getPermission());
// Add a named entries with all access
fs.modifyAclEntries(p1, Lists.newArrayList(aclEntry(ACCESS, USER, "bruce", ALL), aclEntry(ACCESS, GROUP, "groupY", ALL)));
aclStatus = fs.getAclStatus(p1);
assertEquals("Entries should contain owner group entry also", 3, aclStatus.getEntries().size());
// restrict the access
fs.setPermission(p1, FsPermission.valueOf("-rwxr-----"));
// latest permissions should be reflected as effective permission
aclStatus = fs.getAclStatus(p1);
List<AclEntry> entries = aclStatus.getEntries();
for (AclEntry aclEntry : entries) {
if (aclEntry.getName() != null || aclEntry.getType() == GROUP) {
assertEquals(FsAction.ALL, aclEntry.getPermission());
assertEquals(FsAction.READ, aclStatus.getEffectivePermission(aclEntry));
}
}
fsAsBruce.access(p1, READ);
try {
fsAsBruce.access(p1, WRITE);
fail("Access should not be given");
} catch (AccessControlException e) {
// expected
}
fsAsBob.access(p1, READ);
try {
fsAsBob.access(p1, WRITE);
fail("Access should not be given");
} catch (AccessControlException e) {
// expected
}
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestAuditLogger method testAuditLogWithAclFailure.
@Test
public void testAuditLogWithAclFailure() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFS_NAMENODE_ACLS_ENABLED_KEY, true);
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY, DummyAuditLogger.class.getName());
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitClusterUp();
final FSDirectory dir = cluster.getNamesystem().getFSDirectory();
final FSDirectory mockedDir = Mockito.spy(dir);
AccessControlException ex = new AccessControlException();
doThrow(ex).when(mockedDir).getPermissionChecker();
cluster.getNamesystem().setFSDirectory(mockedDir);
assertTrue(DummyAuditLogger.initialized);
DummyAuditLogger.resetLogCount();
final FileSystem fs = cluster.getFileSystem();
final Path p = new Path("/");
final List<AclEntry> acls = Lists.newArrayList();
try {
fs.getAclStatus(p);
} catch (AccessControlException ignored) {
}
try {
fs.setAcl(p, acls);
} catch (AccessControlException ignored) {
}
try {
fs.removeAcl(p);
} catch (AccessControlException ignored) {
}
try {
fs.removeDefaultAcl(p);
} catch (AccessControlException ignored) {
}
try {
fs.removeAclEntries(p, acls);
} catch (AccessControlException ignored) {
}
try {
fs.modifyAclEntries(p, acls);
} catch (AccessControlException ignored) {
}
assertEquals(6, DummyAuditLogger.logCount);
assertEquals(6, DummyAuditLogger.unsuccessfulCount);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSXAttrBaseTest method testXAttrAcl.
@Test(timeout = 120000)
public void testXAttrAcl() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
fs.setOwner(path, BRUCE.getUserName(), null);
FileSystem fsAsBruce = createFileSystem(BRUCE);
FileSystem fsAsDiana = createFileSystem(DIANA);
fsAsBruce.setXAttr(path, name1, value1);
Map<String, byte[]> xattrs;
try {
xattrs = fsAsDiana.getXAttrs(path);
Assert.fail("Diana should not have read access to get xattrs");
} catch (AccessControlException e) {
// Ignore
}
// Give Diana read permissions to the path
fsAsBruce.modifyAclEntries(path, Lists.newArrayList(aclEntry(ACCESS, USER, DIANA.getUserName(), READ)));
xattrs = fsAsDiana.getXAttrs(path);
Assert.assertArrayEquals(value1, xattrs.get(name1));
try {
fsAsDiana.removeXAttr(path, name1);
Assert.fail("Diana should not have write access to remove xattrs");
} catch (AccessControlException e) {
// Ignore
}
try {
fsAsDiana.setXAttr(path, name2, value2);
Assert.fail("Diana should not have write access to set xattrs");
} catch (AccessControlException e) {
// Ignore
}
fsAsBruce.modifyAclEntries(path, Lists.newArrayList(aclEntry(ACCESS, USER, DIANA.getUserName(), ALL)));
fsAsDiana.setXAttr(path, name2, value2);
Assert.assertArrayEquals(value2, fsAsDiana.getXAttrs(path).get(name2));
fsAsDiana.removeXAttr(path, name1);
fsAsDiana.removeXAttr(path, name2);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSAclBaseTest method testAccess.
@Test
public void testAccess() throws IOException, InterruptedException {
Path p1 = new Path("/p1");
fs.mkdirs(p1);
fs.setOwner(p1, BRUCE.getShortUserName(), "groupX");
fsAsBruce.setAcl(p1, Lists.newArrayList(aclEntry(ACCESS, USER, READ), aclEntry(ACCESS, USER, "bruce", READ), aclEntry(ACCESS, GROUP, NONE), aclEntry(ACCESS, OTHER, NONE)));
fsAsBruce.access(p1, FsAction.READ);
try {
fsAsBruce.access(p1, FsAction.WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
Path badPath = new Path("/bad/bad");
try {
fsAsBruce.access(badPath, FsAction.READ);
fail("The access call should have failed");
} catch (FileNotFoundException e) {
// expected
}
// Add a named group entry with only READ access
fsAsBruce.modifyAclEntries(p1, Lists.newArrayList(aclEntry(ACCESS, GROUP, "groupY", READ)));
// Now bob should have read access, but not write
fsAsBob.access(p1, READ);
try {
fsAsBob.access(p1, WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected;
}
// Add another named group entry with WRITE access
fsAsBruce.modifyAclEntries(p1, Lists.newArrayList(aclEntry(ACCESS, GROUP, "groupZ", WRITE)));
// Now bob should have write access
fsAsBob.access(p1, WRITE);
// Add a named user entry to deny bob
fsAsBruce.modifyAclEntries(p1, Lists.newArrayList(aclEntry(ACCESS, USER, "bob", NONE)));
try {
fsAsBob.access(p1, READ);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected;
}
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class CLI method run.
public int run(String[] argv) throws Exception {
int exitCode = -1;
if (argv.length < 1) {
displayUsage("");
return exitCode;
}
// process arguments
String cmd = argv[0];
String submitJobFile = null;
String jobid = null;
String taskid = null;
String historyFileOrJobId = null;
String historyOutFile = null;
String historyOutFormat = HistoryViewer.HUMAN_FORMAT;
String counterGroupName = null;
String counterName = null;
JobPriority jp = null;
String taskType = null;
String taskState = null;
int fromEvent = 0;
int nEvents = 0;
int jpvalue = 0;
String configOutFile = null;
boolean getStatus = false;
boolean getCounter = false;
boolean killJob = false;
boolean listEvents = false;
boolean viewHistory = false;
boolean viewAllHistory = false;
boolean listJobs = false;
boolean listAllJobs = false;
boolean listActiveTrackers = false;
boolean listBlacklistedTrackers = false;
boolean displayTasks = false;
boolean killTask = false;
boolean failTask = false;
boolean setJobPriority = false;
boolean logs = false;
boolean downloadConfig = false;
if ("-submit".equals(cmd)) {
if (argv.length != 2) {
displayUsage(cmd);
return exitCode;
}
submitJobFile = argv[1];
} else if ("-status".equals(cmd)) {
if (argv.length != 2) {
displayUsage(cmd);
return exitCode;
}
jobid = argv[1];
getStatus = true;
} else if ("-counter".equals(cmd)) {
if (argv.length != 4) {
displayUsage(cmd);
return exitCode;
}
getCounter = true;
jobid = argv[1];
counterGroupName = argv[2];
counterName = argv[3];
} else if ("-kill".equals(cmd)) {
if (argv.length != 2) {
displayUsage(cmd);
return exitCode;
}
jobid = argv[1];
killJob = true;
} else if ("-set-priority".equals(cmd)) {
if (argv.length != 3) {
displayUsage(cmd);
return exitCode;
}
jobid = argv[1];
try {
jp = JobPriority.valueOf(argv[2]);
} catch (IllegalArgumentException iae) {
try {
jpvalue = Integer.parseInt(argv[2]);
} catch (NumberFormatException ne) {
LOG.info(ne);
displayUsage(cmd);
return exitCode;
}
}
setJobPriority = true;
} else if ("-events".equals(cmd)) {
if (argv.length != 4) {
displayUsage(cmd);
return exitCode;
}
jobid = argv[1];
fromEvent = Integer.parseInt(argv[2]);
nEvents = Integer.parseInt(argv[3]);
listEvents = true;
} else if ("-history".equals(cmd)) {
viewHistory = true;
if (argv.length < 2 || argv.length > 7) {
displayUsage(cmd);
return exitCode;
}
// Some arguments are optional while others are not, and some require
// second arguments. Due to this, the indexing can vary depending on
// what's specified and what's left out, as summarized in the below table:
// [all] <jobHistoryFile|jobId> [-outfile <file>] [-format <human|json>]
// 1 2 3 4 5 6
// 1 2 3 4
// 1 2 3 4
// 1 2
// 1 2 3 4 5
// 1 2 3
// 1 2 3
// 1
// "all" is optional, but comes first if specified
int index = 1;
if ("all".equals(argv[index])) {
index++;
viewAllHistory = true;
if (argv.length == 2) {
displayUsage(cmd);
return exitCode;
}
}
// Get the job history file or job id argument
historyFileOrJobId = argv[index++];
// "-outfile" is optional, but if specified requires a second argument
if (argv.length > index + 1 && "-outfile".equals(argv[index])) {
index++;
historyOutFile = argv[index++];
}
// "-format" is optional, but if specified required a second argument
if (argv.length > index + 1 && "-format".equals(argv[index])) {
index++;
historyOutFormat = argv[index++];
}
// Check for any extra arguments that don't belong here
if (argv.length > index) {
displayUsage(cmd);
return exitCode;
}
} else if ("-list".equals(cmd)) {
if (argv.length != 1 && !(argv.length == 2 && "all".equals(argv[1]))) {
displayUsage(cmd);
return exitCode;
}
if (argv.length == 2 && "all".equals(argv[1])) {
listAllJobs = true;
} else {
listJobs = true;
}
} else if ("-kill-task".equals(cmd)) {
if (argv.length != 2) {
displayUsage(cmd);
return exitCode;
}
killTask = true;
taskid = argv[1];
} else if ("-fail-task".equals(cmd)) {
if (argv.length != 2) {
displayUsage(cmd);
return exitCode;
}
failTask = true;
taskid = argv[1];
} else if ("-list-active-trackers".equals(cmd)) {
if (argv.length != 1) {
displayUsage(cmd);
return exitCode;
}
listActiveTrackers = true;
} else if ("-list-blacklisted-trackers".equals(cmd)) {
if (argv.length != 1) {
displayUsage(cmd);
return exitCode;
}
listBlacklistedTrackers = true;
} else if ("-list-attempt-ids".equals(cmd)) {
if (argv.length != 4) {
displayUsage(cmd);
return exitCode;
}
jobid = argv[1];
taskType = argv[2];
taskState = argv[3];
displayTasks = true;
if (!taskTypes.contains(org.apache.hadoop.util.StringUtils.toUpperCase(taskType))) {
System.out.println("Error: Invalid task-type: " + taskType);
displayUsage(cmd);
return exitCode;
}
if (!taskStates.contains(org.apache.hadoop.util.StringUtils.toLowerCase(taskState))) {
System.out.println("Error: Invalid task-state: " + taskState);
displayUsage(cmd);
return exitCode;
}
} else if ("-logs".equals(cmd)) {
if (argv.length == 2 || argv.length == 3) {
logs = true;
jobid = argv[1];
if (argv.length == 3) {
taskid = argv[2];
} else {
taskid = null;
}
} else {
displayUsage(cmd);
return exitCode;
}
} else if ("-config".equals(cmd)) {
downloadConfig = true;
if (argv.length != 3) {
displayUsage(cmd);
return exitCode;
}
jobid = argv[1];
configOutFile = argv[2];
} else {
displayUsage(cmd);
return exitCode;
}
// initialize cluster
cluster = createCluster();
// Submit the request
try {
if (submitJobFile != null) {
Job job = Job.getInstance(new JobConf(submitJobFile));
job.submit();
System.out.println("Created job " + job.getJobID());
exitCode = 0;
} else if (getStatus) {
Job job = getJob(JobID.forName(jobid));
if (job == null) {
System.out.println("Could not find job " + jobid);
} else {
Counters counters = job.getCounters();
System.out.println();
System.out.println(job);
if (counters != null) {
System.out.println(counters);
} else {
System.out.println("Counters not available. Job is retired.");
}
exitCode = 0;
}
} else if (getCounter) {
Job job = getJob(JobID.forName(jobid));
if (job == null) {
System.out.println("Could not find job " + jobid);
} else {
Counters counters = job.getCounters();
if (counters == null) {
System.out.println("Counters not available for retired job " + jobid);
exitCode = -1;
} else {
System.out.println(getCounter(counters, counterGroupName, counterName));
exitCode = 0;
}
}
} else if (killJob) {
Job job = getJob(JobID.forName(jobid));
if (job == null) {
System.out.println("Could not find job " + jobid);
} else {
JobStatus jobStatus = job.getStatus();
if (jobStatus.getState() == JobStatus.State.FAILED) {
System.out.println("Could not mark the job " + jobid + " as killed, as it has already failed.");
exitCode = -1;
} else if (jobStatus.getState() == JobStatus.State.KILLED) {
System.out.println("The job " + jobid + " has already been killed.");
exitCode = -1;
} else if (jobStatus.getState() == JobStatus.State.SUCCEEDED) {
System.out.println("Could not kill the job " + jobid + ", as it has already succeeded.");
exitCode = -1;
} else {
job.killJob();
System.out.println("Killed job " + jobid);
exitCode = 0;
}
}
} else if (setJobPriority) {
Job job = getJob(JobID.forName(jobid));
if (job == null) {
System.out.println("Could not find job " + jobid);
} else {
if (jp != null) {
job.setPriority(jp);
} else {
job.setPriorityAsInteger(jpvalue);
}
System.out.println("Changed job priority.");
exitCode = 0;
}
} else if (viewHistory) {
// it's a Job ID
if (historyFileOrJobId.endsWith(".jhist")) {
viewHistory(historyFileOrJobId, viewAllHistory, historyOutFile, historyOutFormat);
exitCode = 0;
} else {
Job job = getJob(JobID.forName(historyFileOrJobId));
if (job == null) {
System.out.println("Could not find job " + jobid);
} else {
String historyUrl = job.getHistoryUrl();
if (historyUrl == null || historyUrl.isEmpty()) {
System.out.println("History file for job " + historyFileOrJobId + " is currently unavailable.");
} else {
viewHistory(historyUrl, viewAllHistory, historyOutFile, historyOutFormat);
exitCode = 0;
}
}
}
} else if (listEvents) {
Job job = getJob(JobID.forName(jobid));
if (job == null) {
System.out.println("Could not find job " + jobid);
} else {
listEvents(job, fromEvent, nEvents);
exitCode = 0;
}
} else if (listJobs) {
listJobs(cluster);
exitCode = 0;
} else if (listAllJobs) {
listAllJobs(cluster);
exitCode = 0;
} else if (listActiveTrackers) {
listActiveTrackers(cluster);
exitCode = 0;
} else if (listBlacklistedTrackers) {
listBlacklistedTrackers(cluster);
exitCode = 0;
} else if (displayTasks) {
Job job = getJob(JobID.forName(jobid));
if (job == null) {
System.out.println("Could not find job " + jobid);
} else {
displayTasks(getJob(JobID.forName(jobid)), taskType, taskState);
exitCode = 0;
}
} else if (killTask) {
TaskAttemptID taskID = TaskAttemptID.forName(taskid);
Job job = getJob(taskID.getJobID());
if (job == null) {
System.out.println("Could not find job " + jobid);
} else if (job.killTask(taskID, false)) {
System.out.println("Killed task " + taskid);
exitCode = 0;
} else {
System.out.println("Could not kill task " + taskid);
exitCode = -1;
}
} else if (failTask) {
TaskAttemptID taskID = TaskAttemptID.forName(taskid);
Job job = getJob(taskID.getJobID());
if (job == null) {
System.out.println("Could not find job " + jobid);
} else if (job.killTask(taskID, true)) {
System.out.println("Killed task " + taskID + " by failing it");
exitCode = 0;
} else {
System.out.println("Could not fail task " + taskid);
exitCode = -1;
}
} else if (logs) {
JobID jobID = JobID.forName(jobid);
if (getJob(jobID) == null) {
System.out.println("Could not find job " + jobid);
} else {
try {
TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskid);
LogParams logParams = cluster.getLogParams(jobID, taskAttemptID);
LogCLIHelpers logDumper = new LogCLIHelpers();
logDumper.setConf(getConf());
exitCode = logDumper.dumpAContainersLogs(logParams.getApplicationId(), logParams.getContainerId(), logParams.getNodeId(), logParams.getOwner());
} catch (IOException e) {
if (e instanceof RemoteException) {
throw e;
}
System.out.println(e.getMessage());
}
}
} else if (downloadConfig) {
Job job = getJob(JobID.forName(jobid));
if (job == null) {
System.out.println("Could not find job " + jobid);
} else {
String jobFile = job.getJobFile();
if (jobFile == null || jobFile.isEmpty()) {
System.out.println("Config file for job " + jobFile + " could not be found.");
} else {
Path configPath = new Path(jobFile);
FileSystem fs = FileSystem.get(getConf());
fs.copyToLocalFile(configPath, new Path(configOutFile));
exitCode = 0;
}
}
}
} catch (RemoteException re) {
IOException unwrappedException = re.unwrapRemoteException();
if (unwrappedException instanceof AccessControlException) {
System.out.println(unwrappedException.getMessage());
} else {
throw re;
}
} finally {
cluster.close();
}
return exitCode;
}
Aggregations