use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestAuditLoggerWithCommands method testGetQuotaUsage.
@Test
public void testGetQuotaUsage() throws Exception {
Path path = new Path("/test");
fs.mkdirs(path, new FsPermission((short) 0));
String aceGetQuotaUsagePattern = ".*allowed=false.*ugi=theDoctor.*cmd=quotaUsage.*";
fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
try {
fileSys.getQuotaUsage(path);
fail("The operation should have failed with AccessControlException");
} catch (AccessControlException ace) {
}
int length = verifyAuditLogs(aceGetQuotaUsagePattern);
fileSys.close();
try {
fileSys.getQuotaUsage(path);
fail("The operation should have failed with IOException");
} catch (IOException e) {
}
assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestAuditLoggerWithCommands method testGetEZForPath.
@Test
public void testGetEZForPath() throws Exception {
Path path = new Path("/test");
fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
fs.mkdirs(path, new FsPermission((short) 0));
String aceGetEzForPathPattern = ".*allowed=false.*ugi=theDoctor.*cmd=getEZForPath.*";
try {
((DistributedFileSystem) fileSys).getEZForPath(path);
fail("The operation should have failed with AccessControlException");
} catch (AccessControlException ace) {
}
int length = verifyAuditLogs(aceGetEzForPathPattern);
fileSys.close();
try {
((DistributedFileSystem) fileSys).getEZForPath(path);
fail("The operation should have failed with IOException");
} catch (IOException e) {
}
assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestAuditLoggerWithCommands method testModifyCachePool.
@Test
public void testModifyCachePool() throws Exception {
removeExistingCachePools(null);
CachePoolInfo cacheInfo = new CachePoolInfo("pool1").setMode(new FsPermission((short) 0));
((DistributedFileSystem) fs).addCachePool(cacheInfo);
fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
try {
((DistributedFileSystem) fileSys).modifyCachePool(cacheInfo);
fail("The operation should have failed with AccessControlException");
} catch (AccessControlException ace) {
}
String aceModifyCachePoolPattern = ".*allowed=false.*ugi=theDoctor.*cmd=modifyCachePool.*";
int length = verifyAuditLogs(aceModifyCachePoolPattern);
try {
fileSys.close();
((DistributedFileSystem) fileSys).modifyCachePool(cacheInfo);
fail("The operation should have failed with IOException");
} catch (IOException e) {
}
assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestWebHdfsCreatePermissions method testPermissions.
private void testPermissions(int expectedResponse, String expectedPermission, String path, String... params) throws Exception {
final String user = System.getProperty("user.name");
final StringBuilder uri = new StringBuilder(cluster.getHttpUri(0));
uri.append("/webhdfs/v1").append(path).append("?user.name=").append(user).append("&");
for (String param : params) {
uri.append(param).append("&");
}
LOG.info(uri.toString());
try {
URL url = new URL(uri.toString());
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(expectedResponse, conn.getResponseCode());
NamenodeProtocols namenode = cluster.getNameNode().getRpcServer();
FsPermission resultingPermission = namenode.getFileInfo(path).getPermission();
Assert.assertEquals(expectedPermission, resultingPermission.toString());
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestCredentialProviderFactory method testLocalJksProvider.
@Test
public void testLocalJksProvider() throws Exception {
Configuration conf = new Configuration();
final Path jksPath = new Path(tmpDir.toString(), "test.jks");
final String ourUrl = LocalJavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(tmpDir, "test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
checkSpecificProvider(conf, ourUrl);
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue("Unexpected permissions: " + s.getPermission().toString(), s.getPermission().toString().equals("rw-------"));
assertTrue(file + " should exist", file.isFile());
// check permission retention after explicit change
fs.setPermission(path, new FsPermission("777"));
checkPermissionRetention(conf, ourUrl, path);
}
Aggregations