use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method removeAclEntries.
void removeAclEntries(final String src, List<AclEntry> aclSpec) throws IOException {
final String operationName = "removeAclEntries";
checkOperation(OperationCategory.WRITE);
HdfsFileStatus auditStat = null;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove ACL entries on " + src);
auditStat = FSDirAclOp.removeAclEntries(dir, src, aclSpec);
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
logAuditEvent(true, operationName, src, null, auditStat);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method unsetErasureCodingPolicy.
/**
* Unset an erasure coding policy from the given path.
* @param srcArg The path of the target directory.
* @throws AccessControlException if the caller is not the superuser.
* @throws UnresolvedLinkException if the path can't be resolved.
* @throws SafeModeException if the Namenode is in safe mode.
*/
void unsetErasureCodingPolicy(final String srcArg, final boolean logRetryCache) throws IOException, UnresolvedLinkException, SafeModeException, AccessControlException {
final String operationName = "unsetErasureCodingPolicy";
checkOperation(OperationCategory.WRITE);
HdfsFileStatus resultingStat = null;
final FSPermissionChecker pc = getPermissionChecker();
boolean success = false;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot unset erasure coding policy on " + srcArg);
resultingStat = FSDirErasureCodingOp.unsetErasureCodingPolicy(this, srcArg, pc, logRetryCache);
success = true;
} catch (AccessControlException ace) {
logAuditEvent(success, operationName, srcArg, null, resultingStat);
throw ace;
} finally {
writeUnlock(operationName);
if (success) {
getEditLog().logSync();
}
}
logAuditEvent(success, operationName, srcArg, null, resultingStat);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method startFile.
/**
* Create a new file entry in the namespace.
*
* For description of parameters and exceptions thrown see
* {@link ClientProtocol#create}, except it returns valid file status upon
* success
*/
HdfsFileStatus startFile(String src, PermissionStatus permissions, String holder, String clientMachine, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions, boolean logRetryCache) throws IOException {
HdfsFileStatus status;
try {
status = startFileInt(src, permissions, holder, clientMachine, flag, createParent, replication, blockSize, supportedVersions, logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, "create", src);
throw e;
}
logAuditEvent(true, "create", src, null, status);
return status;
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method removeXAttr.
void removeXAttr(String src, XAttr xAttr, boolean logRetryCache) throws IOException {
final String operationName = "removeXAttr";
HdfsFileStatus auditStat = null;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove XAttr entry on " + src);
auditStat = FSDirXAttrOp.removeXAttr(dir, src, xAttr, logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
logAuditEvent(true, operationName, src, null, auditStat);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestFileAppend2 method testSimpleAppend.
/**
* Creates one file, writes a few bytes to it and then closed it.
* Reopens the same file for appending, write all blocks and then close.
* Verify that all data exists in file.
* @throws IOException an exception might be thrown
*/
@Test
public void testSimpleAppend() throws IOException {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
try {
{
// test appending to a file.
// create a new file.
Path file1 = new Path("/simpleAppend.dat");
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
System.out.println("Created file simpleAppend.dat");
// write to file
// io.bytes.per.checksum bytes
int mid = 186;
System.out.println("Writing " + mid + " bytes to file " + file1);
stm.write(fileContents, 0, mid);
stm.close();
System.out.println("Wrote and Closed first part of file.");
// write to file
// io.bytes.per.checksum bytes
int mid2 = 607;
System.out.println("Writing " + mid + " bytes to file " + file1);
stm = fs.append(file1);
stm.write(fileContents, mid, mid2 - mid);
stm.close();
System.out.println("Wrote and Closed second part of file.");
// write the remainder of the file
stm = fs.append(file1);
// ensure getPos is set to reflect existing size of the file
assertTrue(stm.getPos() > 0);
System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) + " bytes to file " + file1);
stm.write(fileContents, mid2, AppendTestUtil.FILE_SIZE - mid2);
System.out.println("Written second part of file");
stm.close();
System.out.println("Wrote and Closed second part of file.");
// verify that entire file is good
AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");
}
{
// test appending to an non-existing file.
FSDataOutputStream out = null;
try {
out = fs.append(new Path("/non-existing.dat"));
fail("Expected to have FileNotFoundException");
} catch (java.io.FileNotFoundException fnfe) {
System.out.println("Good: got " + fnfe);
fnfe.printStackTrace(System.out);
} finally {
IOUtils.closeStream(out);
}
}
{
// test append permission.
//set root to all writable
Path root = new Path("/");
fs.setPermission(root, new FsPermission((short) 0777));
fs.close();
// login as a different user
final UserGroupInformation superuser = UserGroupInformation.getCurrentUser();
String username = "testappenduser";
String group = "testappendgroup";
assertFalse(superuser.getShortUserName().equals(username));
assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
UserGroupInformation appenduser = UserGroupInformation.createUserForTesting(username, new String[] { group });
fs = DFSTestUtil.getFileSystemAs(appenduser, conf);
// create a file
Path dir = new Path(root, getClass().getSimpleName());
Path foo = new Path(dir, "foo.dat");
FSDataOutputStream out = null;
int offset = 0;
try {
out = fs.create(foo);
int len = 10 + AppendTestUtil.nextInt(100);
out.write(fileContents, offset, len);
offset += len;
} finally {
IOUtils.closeStream(out);
}
// change dir and foo to minimal permissions.
fs.setPermission(dir, new FsPermission((short) 0100));
fs.setPermission(foo, new FsPermission((short) 0200));
// try append, should success
out = null;
try {
out = fs.append(foo);
int len = 10 + AppendTestUtil.nextInt(100);
out.write(fileContents, offset, len);
offset += len;
} finally {
IOUtils.closeStream(out);
}
// change dir and foo to all but no write on foo.
fs.setPermission(foo, new FsPermission((short) 0577));
fs.setPermission(dir, new FsPermission((short) 0777));
// try append, should fail
out = null;
try {
out = fs.append(foo);
fail("Expected to have AccessControlException");
} catch (AccessControlException ace) {
System.out.println("Good: got " + ace);
ace.printStackTrace(System.out);
} finally {
IOUtils.closeStream(out);
}
}
} catch (IOException e) {
System.out.println("Exception :" + e);
throw e;
} catch (Throwable e) {
System.out.println("Throwable :" + e);
e.printStackTrace();
throw new IOException("Throwable : " + e);
} finally {
fs.close();
cluster.shutdown();
}
}
Aggregations