use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class FileSystemContractBaseTest method testMkdirsWithUmask.
public void testMkdirsWithUmask() throws Exception {
if (!isS3(fs)) {
Configuration conf = fs.getConf();
String oldUmask = conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY);
try {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, TEST_UMASK);
final Path dir = path("newDir");
assertTrue(fs.mkdirs(dir, new FsPermission((short) 0777)));
FileStatus status = fs.getFileStatus(dir);
assertTrue(status.isDirectory());
assertEquals((short) 0715, status.getPermission().toShort());
} finally {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, oldUmask);
}
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class FileSystemTestWrapper method create.
@Override
public FSDataOutputStream create(Path f, EnumSet<CreateFlag> createFlag, CreateOpts... opts) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, IOException {
// Need to translate the FileContext-style options into FileSystem-style
// Permissions with umask
CreateOpts.Perms permOpt = CreateOpts.getOpt(CreateOpts.Perms.class, opts);
FsPermission umask = FsPermission.getUMask(fs.getConf());
FsPermission permission = (permOpt != null) ? permOpt.getValue() : FsPermission.getFileDefault().applyUMask(umask);
permission = permission.applyUMask(umask);
// Overwrite
boolean overwrite = createFlag.contains(CreateFlag.OVERWRITE);
// bufferSize
int bufferSize = fs.getConf().getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
CreateOpts.BufferSize bufOpt = CreateOpts.getOpt(CreateOpts.BufferSize.class, opts);
bufferSize = (bufOpt != null) ? bufOpt.getValue() : bufferSize;
// replication
short replication = fs.getDefaultReplication(f);
CreateOpts.ReplicationFactor repOpt = CreateOpts.getOpt(CreateOpts.ReplicationFactor.class, opts);
replication = (repOpt != null) ? repOpt.getValue() : replication;
// blockSize
long blockSize = fs.getDefaultBlockSize(f);
CreateOpts.BlockSize blockOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, opts);
blockSize = (blockOpt != null) ? blockOpt.getValue() : blockSize;
// Progressable
Progressable progress = null;
CreateOpts.Progress progressOpt = CreateOpts.getOpt(CreateOpts.Progress.class, opts);
progress = (progressOpt != null) ? progressOpt.getValue() : progress;
return fs.create(f, permission, overwrite, bufferSize, replication, blockSize, progress);
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestTrash method verifyTrashPermission.
/**
* Create a bunch of files and set with different permission, after
* moved to trash, verify the location in trash directory is expected
* and the permission is reserved.
*
* @throws IOException
*/
public static void verifyTrashPermission(FileSystem fs, Configuration conf) throws IOException {
Path caseRoot = new Path(GenericTestUtils.getTempPath("testTrashPermission"));
try (FileSystem fileSystem = fs) {
Trash trash = new Trash(fileSystem, conf);
FileSystemTestWrapper wrapper = new FileSystemTestWrapper(fileSystem);
short[] filePermssions = { (short) 0600, (short) 0644, (short) 0660, (short) 0700, (short) 0750, (short) 0755, (short) 0775, (short) 0777 };
for (int i = 0; i < filePermssions.length; i++) {
// Set different permission to files
FsPermission fsPermission = new FsPermission(filePermssions[i]);
Path file = new Path(caseRoot, "file" + i);
byte[] randomBytes = new byte[new Random().nextInt(10)];
wrapper.writeFile(file, randomBytes);
wrapper.setPermission(file, fsPermission);
// Move file to trash
trash.moveToTrash(file);
// Verify the file is moved to trash, at expected location
Path trashDir = trash.getCurrentTrashDir(file);
if (!file.isAbsolute()) {
file = wrapper.makeQualified(file);
}
Path fileInTrash = Path.mergePaths(trashDir, file);
FileStatus fstat = wrapper.getFileStatus(fileInTrash);
assertTrue(String.format("File %s is not moved to trash", fileInTrash.toString()), wrapper.exists(fileInTrash));
// Verify permission not change
assertTrue(String.format("Expected file: %s is %s, but actual is %s", fileInTrash.toString(), fsPermission.toString(), fstat.getPermission().toString()), fstat.getPermission().equals(fsPermission));
}
// Verify the trash directory can be removed
Path trashRoot = trash.getCurrentTrashDir();
assertTrue(wrapper.delete(trashRoot, true));
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class StorageLocation method makeBlockPoolDir.
/**
* Create physical directory for block pools on the data node.
*
* @param blockPoolID
* the block pool id
* @param conf
* Configuration instance to use.
* @throws IOException on errors
*/
public void makeBlockPoolDir(String blockPoolID, Configuration conf) throws IOException {
if (conf == null) {
conf = new HdfsConfiguration();
}
LocalFileSystem localFS = FileSystem.getLocal(conf);
FsPermission permission = new FsPermission(conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
File data = new File(getBpURI(blockPoolID, Storage.STORAGE_DIR_CURRENT));
try {
DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
} catch (IOException e) {
DataStorage.LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": " + e.getMessage());
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class FSPermissionChecker method hasPermission.
// return whether access is permitted. note it neither requires a path or
// throws so the caller can build the path only if required for an exception.
// very beneficial for subaccess checks!
private boolean hasPermission(INodeAttributes inode, FsAction access) {
if (inode == null) {
return true;
}
final FsPermission mode = inode.getFsPermission();
final AclFeature aclFeature = inode.getAclFeature();
if (aclFeature != null) {
// It's possible that the inode has a default ACL but no access ACL.
int firstEntry = aclFeature.getEntryAt(0);
if (AclEntryStatusFormat.getScope(firstEntry) == AclEntryScope.ACCESS) {
return hasAclPermission(inode, access, mode, aclFeature);
}
}
final FsAction checkAction;
if (getUser().equals(inode.getUserName())) {
//user class
checkAction = mode.getUserAction();
} else if (isMemberOfGroup(inode.getGroupName())) {
//group class
checkAction = mode.getGroupAction();
} else {
//other class
checkAction = mode.getOtherAction();
}
return checkAction.implies(access);
}
Aggregations