use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class AclTestHelpers method assertPermission.
/**
* Asserts the value of the FsPermission bits on the inode of a specific path.
*
* @param fs FileSystem to use for check
* @param pathToCheck Path inode to check
* @param perm short expected permission bits
* @throws IOException thrown if there is an I/O error
*/
public static void assertPermission(FileSystem fs, Path pathToCheck, short perm) throws IOException {
short filteredPerm = (short) (perm & 01777);
FsPermission fsPermission = fs.getFileStatus(pathToCheck).getPermission();
assertEquals(filteredPerm, fsPermission.toShort());
assertEquals(((perm & (1 << 12)) != 0), fsPermission.getAclBit());
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class FSAclBaseTest method testDefaultAclNewDirWithMode.
@Test
public void testDefaultAclNewDirWithMode() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0755));
List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
Path dirPath = new Path(path, "dir1");
fs.mkdirs(dirPath, new FsPermission((short) 0740));
AclStatus s = fs.getAclStatus(dirPath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) }, returned);
assertPermission(dirPath, (short) 010740);
assertAclFeature(dirPath, true);
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestSecureNameNode method testName.
@Test
public void testName() throws Exception {
MiniDFSCluster cluster = null;
HdfsConfiguration conf = createSecureConfig("authentication,privacy");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
final MiniDFSCluster clusterRef = cluster;
cluster.waitActive();
FileSystem fsForSuperUser = UserGroupInformation.loginUserFromKeytabAndReturnUGI(getHdfsPrincipal(), getHdfsKeytab()).doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return clusterRef.getFileSystem();
}
});
fsForSuperUser.mkdirs(new Path("/tmp"));
fsForSuperUser.setPermission(new Path("/tmp"), new FsPermission((short) 511));
UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(getUserPrincipal(), getUserKeyTab());
FileSystem fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return clusterRef.getFileSystem();
}
});
Path p = new Path("/mydir");
exception.expect(IOException.class);
fs.mkdirs(p);
Path tmp = new Path("/tmp/alpha");
fs.mkdirs(tmp);
assertNotNull(fs.listStatus(tmp));
assertEquals(AuthenticationMethod.KERBEROS, ugi.getAuthenticationMethod());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestSecureNameNodeWithExternalKdc method testSecureNameNode.
@Test
public void testSecureNameNode() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
try {
String nnPrincipal = System.getProperty("dfs.namenode.kerberos.principal");
String nnSpnegoPrincipal = System.getProperty("dfs.namenode.kerberos.internal.spnego.principal");
String nnKeyTab = System.getProperty("dfs.namenode.keytab.file");
assertNotNull("NameNode principal was not specified", nnPrincipal);
assertNotNull("NameNode SPNEGO principal was not specified", nnSpnegoPrincipal);
assertNotNull("NameNode keytab was not specified", nnKeyTab);
Configuration conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, nnPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, nnSpnegoPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, nnKeyTab);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
final MiniDFSCluster clusterRef = cluster;
cluster.waitActive();
FileSystem fsForCurrentUser = cluster.getFileSystem();
fsForCurrentUser.mkdirs(new Path("/tmp"));
fsForCurrentUser.setPermission(new Path("/tmp"), new FsPermission((short) 511));
// The user specified should not be a superuser
String userPrincipal = System.getProperty("user.principal");
String userKeyTab = System.getProperty("user.keytab");
assertNotNull("User principal was not specified", userPrincipal);
assertNotNull("User keytab was not specified", userKeyTab);
UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(userPrincipal, userKeyTab);
FileSystem fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return clusterRef.getFileSystem();
}
});
try {
Path p = new Path("/users");
fs.mkdirs(p);
fail("User must not be allowed to write in /");
} catch (IOException expected) {
}
Path p = new Path("/tmp/alpha");
fs.mkdirs(p);
assertNotNull(fs.listStatus(p));
assertEquals(AuthenticationMethod.KERBEROS, ugi.getAuthenticationMethod());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class JobSplitWriter method createFile.
private static FSDataOutputStream createFile(FileSystem fs, Path splitFile, Configuration job) throws IOException {
FSDataOutputStream out = FileSystem.create(fs, splitFile, new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION));
int replication = job.getInt(Job.SUBMIT_REPLICATION, 10);
fs.setReplication(splitFile, (short) replication);
writeSplitHeader(out);
return out;
}
Aggregations