use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class JobHistoryEventHandler method moveToDoneNow.
// TODO If the FS objects are the same, this should be a rename instead of a
// copy.
private void moveToDoneNow(Path fromPath, Path toPath) throws IOException {
// check if path exists, in case of retries it may not exist
if (stagingDirFS.exists(fromPath)) {
LOG.info("Copying " + fromPath.toString() + " to " + toPath.toString());
// TODO temporarily removing the existing dst
doneDirFS.delete(toPath, true);
boolean copied = FileUtil.copy(stagingDirFS, fromPath, doneDirFS, toPath, false, getConfig());
if (copied)
LOG.info("Copied to done location: " + toPath);
else
LOG.info("copy failed");
doneDirFS.setPermission(toPath, new FsPermission(JobHistoryUtils.HISTORY_INTERMEDIATE_FILE_PERMISSIONS));
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestPermission method testBackwardCompatibility.
/**
* Tests backward compatibility. Configuration can be
* either set with old param dfs.umask that takes decimal umasks
* or dfs.umaskmode that takes symbolic or octal umask.
*/
@Test
public void testBackwardCompatibility() {
// Test 1 - old configuration key with decimal
// umask value should be handled when set using
// FSPermission.setUMask() API
FsPermission perm = new FsPermission((short) 18);
Configuration conf = new Configuration();
FsPermission.setUMask(conf, perm);
assertEquals(18, FsPermission.getUMask(conf).toShort());
// Test 2 - new configuration key is handled
conf = new Configuration();
conf.set(FsPermission.UMASK_LABEL, "022");
assertEquals(18, FsPermission.getUMask(conf).toShort());
// Test 3 - equivalent valid umask
conf = new Configuration();
conf.set(FsPermission.UMASK_LABEL, "0022");
assertEquals(18, FsPermission.getUMask(conf).toShort());
// Test 4 - invalid umask
conf = new Configuration();
conf.set(FsPermission.UMASK_LABEL, "1222");
try {
FsPermission.getUMask(conf);
fail("expect IllegalArgumentException happen");
} catch (IllegalArgumentException e) {
//pass, exception successfully trigger
}
// Test 5 - invalid umask
conf = new Configuration();
conf.set(FsPermission.UMASK_LABEL, "01222");
try {
FsPermission.getUMask(conf);
fail("expect IllegalArgumentException happen");
} catch (IllegalArgumentException e) {
//pass, exception successfully trigger
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestPermission method testCreate.
@Test
public void testCreate() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
conf.set(FsPermission.UMASK_LABEL, "000");
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
fs = FileSystem.get(conf);
FsPermission rootPerm = checkPermission(fs, "/", null);
FsPermission inheritPerm = FsPermission.createImmutable((short) (rootPerm.toShort() | 0300));
FsPermission dirPerm = new FsPermission((short) 0777);
fs.mkdirs(new Path("/a1/a2/a3"), dirPerm);
checkPermission(fs, "/a1", dirPerm);
checkPermission(fs, "/a1/a2", dirPerm);
checkPermission(fs, "/a1/a2/a3", dirPerm);
dirPerm = new FsPermission((short) 0123);
FsPermission permission = FsPermission.createImmutable((short) (dirPerm.toShort() | 0300));
fs.mkdirs(new Path("/aa/1/aa/2/aa/3"), dirPerm);
checkPermission(fs, "/aa/1", permission);
checkPermission(fs, "/aa/1/aa/2", permission);
checkPermission(fs, "/aa/1/aa/2/aa/3", dirPerm);
FsPermission filePerm = new FsPermission((short) 0444);
Path p = new Path("/b1/b2/b3.txt");
FSDataOutputStream out = fs.create(p, filePerm, true, conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), fs.getDefaultReplication(p), fs.getDefaultBlockSize(p), null);
out.write(123);
out.close();
checkPermission(fs, "/b1", inheritPerm);
checkPermission(fs, "/b1/b2", inheritPerm);
checkPermission(fs, "/b1/b2/b3.txt", filePerm);
conf.set(FsPermission.UMASK_LABEL, "022");
permission = FsPermission.createImmutable((short) 0666);
FileSystem.mkdirs(fs, new Path("/c1"), new FsPermission(permission));
FileSystem.create(fs, new Path("/c1/c2.txt"), new FsPermission(permission));
checkPermission(fs, "/c1", permission);
checkPermission(fs, "/c1/c2.txt", permission);
} finally {
try {
if (fs != null)
fs.close();
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
}
try {
if (cluster != null)
cluster.shutdown();
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
}
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestPermission method testNonSuperCannotChangeOwnerForOtherFile.
private void testNonSuperCannotChangeOwnerForOtherFile() throws Exception {
Path file = createFile(nnfs, "testNonSuperCannotChangeOwnerForOtherFile");
nnfs.setPermission(file, new FsPermission("777"));
try {
userfs.setOwner(file, USER_NAME, null);
fail("Expect ACE when a non-super user tries to own a file");
} catch (AccessControlException e) {
assertThat(e.getMessage(), startsWith("Permission denied"));
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestPermissionSymlinks method testRenameSrcNotWritableFS.
@Test(timeout = 5000)
public void testRenameSrcNotWritableFS() throws Exception {
fs.setPermission(linkParent, new FsPermission((short) 0555));
doRenameSrcNotWritableFS();
}
Aggregations