use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class DFSTestUtil method createStripedFile.
/**
* Creates the metadata of a file in striped layout. This method only
* manipulates the NameNode state without injecting data to DataNode.
* You should disable periodical heartbeat before use this.
* @param file Path of the file to create
* @param dir Parent path of the file
* @param numBlocks Number of striped block groups to add to the file
* @param numStripesPerBlk Number of striped cells in each block
* @param toMkdir
* @param ecPolicy erasure coding policy apply to created file. A null value
* means using default erasure coding policy.
*/
public static void createStripedFile(MiniDFSCluster cluster, Path file, Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir, ErasureCodingPolicy ecPolicy) throws Exception {
DistributedFileSystem dfs = cluster.getFileSystem();
// If outer test already set EC policy, dir should be left as null
if (toMkdir) {
assert dir != null;
dfs.mkdirs(dir);
try {
dfs.getClient().setErasureCodingPolicy(dir.toString(), ecPolicy.getName());
} catch (IOException e) {
if (!e.getMessage().contains("non-empty directory")) {
throw e;
}
}
}
cluster.getNameNodeRpc().create(file.toString(), new FsPermission((short) 0755), dfs.getClient().getClientName(), new EnumSetWritable<>(EnumSet.of(CreateFlag.CREATE)), false, (short) 1, 128 * 1024 * 1024L, null);
FSNamesystem ns = cluster.getNamesystem();
FSDirectory fsdir = ns.getFSDirectory();
INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
ExtendedBlock previous = null;
for (int i = 0; i < numBlocks; i++) {
Block newBlock = addBlockToFile(true, cluster.getDataNodes(), dfs, ns, file.toString(), fileNode, dfs.getClient().getClientName(), previous, numStripesPerBlk, 0);
previous = new ExtendedBlock(ns.getBlockPoolId(), newBlock);
}
dfs.getClient().namenode.complete(file.toString(), dfs.getClient().getClientName(), previous, fileNode.getId());
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestSymlinkHdfs method testSetPermissionAffectsTarget.
@Test(timeout = 10000)
public /** setPermission affects the target not the link */
void testSetPermissionAffectsTarget() throws IOException {
Path file = new Path(testBaseDir1(), "file");
Path dir = new Path(testBaseDir2());
Path linkToFile = new Path(testBaseDir1(), "linkToFile");
Path linkToDir = new Path(testBaseDir1(), "linkToDir");
createAndWriteFile(file);
wrapper.createSymlink(file, linkToFile, false);
wrapper.createSymlink(dir, linkToDir, false);
// Changing the permissions using the link does not modify
// the permissions of the link..
FsPermission perms = wrapper.getFileLinkStatus(linkToFile).getPermission();
wrapper.setPermission(linkToFile, new FsPermission((short) 0664));
wrapper.setOwner(linkToFile, "user", "group");
assertEquals(perms, wrapper.getFileLinkStatus(linkToFile).getPermission());
// but the file's permissions were adjusted appropriately
FileStatus stat = wrapper.getFileStatus(file);
assertEquals(0664, stat.getPermission().toShort());
assertEquals("user", stat.getOwner());
assertEquals("group", stat.getGroup());
// Getting the file's permissions via the link is the same
// as getting the permissions directly.
assertEquals(stat.getPermission(), wrapper.getFileStatus(linkToFile).getPermission());
// Ditto for a link to a directory
perms = wrapper.getFileLinkStatus(linkToDir).getPermission();
wrapper.setPermission(linkToDir, new FsPermission((short) 0664));
wrapper.setOwner(linkToDir, "user", "group");
assertEquals(perms, wrapper.getFileLinkStatus(linkToDir).getPermission());
stat = wrapper.getFileStatus(dir);
assertEquals(0664, stat.getPermission().toShort());
assertEquals("user", stat.getOwner());
assertEquals("group", stat.getGroup());
assertEquals(stat.getPermission(), wrapper.getFileStatus(linkToDir).getPermission());
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestAuditLogs method testAuditWebHdfsStat.
/** test that stat via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsStat() throws Exception {
final Path file = new Path(fnames[0]);
fs.setPermission(file, new FsPermission((short) 0644));
fs.setOwner(file, "root", null);
setupAuditLogs();
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME);
FileStatus st = webfs.getFileStatus(file);
verifyAuditLogs(true);
assertTrue("failed to stat file", st != null && st.isFile());
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestAuditLogs method testAuditDenied.
/** test that denied operation puts proper entry in audit log */
@Test
public void testAuditDenied() throws Exception {
final Path file = new Path(fnames[0]);
FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
fs.setPermission(file, new FsPermission((short) 0600));
fs.setOwner(file, "root", null);
setupAuditLogs();
try {
userfs.open(file);
fail("open must not succeed");
} catch (AccessControlException e) {
System.out.println("got access denied, as expected.");
}
verifyAuditLogs(false);
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestAuditLogs method testAuditWebHdfsDenied.
/** test that denied access via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsDenied() throws Exception {
final Path file = new Path(fnames[0]);
fs.setPermission(file, new FsPermission((short) 0600));
fs.setOwner(file, "root", null);
setupAuditLogs();
try {
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME);
InputStream istream = webfs.open(file);
int val = istream.read();
fail("open+read must not succeed, got " + val);
} catch (AccessControlException E) {
System.out.println("got access denied, as expected.");
}
verifyAuditLogsRepeat(false, 2);
}
Aggregations