use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestDFSClientRetries method namenodeRestartTest.
public static void namenodeRestartTest(final Configuration conf, final boolean isWebHDFS) throws Exception {
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
final List<Exception> exceptions = new ArrayList<Exception>();
final Path dir = new Path("/testNamenodeRestart");
if (isWebHDFS) {
conf.setBoolean(HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_KEY, true);
} else {
conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, true);
}
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
conf.setInt(MiniDFSCluster.DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 5000);
final short numDatanodes = 3;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final FileSystem fs = isWebHDFS ? WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME) : dfs;
final URI uri = dfs.getUri();
assertTrue(HdfsUtils.isHealthy(uri));
//create a file
final long length = 1L << 20;
final Path file1 = new Path(dir, "foo");
DFSTestUtil.createFile(fs, file1, length, numDatanodes, 20120406L);
//get file status
final FileStatus s1 = fs.getFileStatus(file1);
assertEquals(length, s1.getLen());
//create file4, write some data but not close
final Path file4 = new Path(dir, "file4");
final FSDataOutputStream out4 = fs.create(file4, false, 4096, fs.getDefaultReplication(file4), 1024L, null);
final byte[] bytes = new byte[1000];
new Random().nextBytes(bytes);
out4.write(bytes);
out4.write(bytes);
if (isWebHDFS) {
// WebHDFS does not support hflush. To avoid DataNode communicating with
// NN while we're shutting down NN, we call out4.close() to finish
// writing the data
out4.close();
} else {
out4.hflush();
}
//shutdown namenode
assertTrue(HdfsUtils.isHealthy(uri));
cluster.shutdownNameNode(0);
assertFalse(HdfsUtils.isHealthy(uri));
//namenode is down, continue writing file4 in a thread
final Thread file4thread = new Thread(new Runnable() {
@Override
public void run() {
try {
//write some more data and then close the file
if (!isWebHDFS) {
out4.write(bytes);
out4.write(bytes);
out4.write(bytes);
out4.close();
}
} catch (Exception e) {
exceptions.add(e);
}
}
});
file4thread.start();
//namenode is down, read the file in a thread
final Thread reader = new Thread(new Runnable() {
@Override
public void run() {
try {
//it should retry till namenode is up.
final FileSystem fs = createFsWithDifferentUsername(conf, isWebHDFS);
final FSDataInputStream in = fs.open(file1);
int count = 0;
for (; in.read() != -1; count++) ;
in.close();
assertEquals(s1.getLen(), count);
} catch (Exception e) {
exceptions.add(e);
}
}
});
reader.start();
//namenode is down, create another file in a thread
final Path file3 = new Path(dir, "file");
final Thread thread = new Thread(new Runnable() {
@Override
public void run() {
try {
//it should retry till namenode is up.
final FileSystem fs = createFsWithDifferentUsername(conf, isWebHDFS);
DFSTestUtil.createFile(fs, file3, length, numDatanodes, 20120406L);
} catch (Exception e) {
exceptions.add(e);
}
}
});
thread.start();
//restart namenode in a new thread
new Thread(new Runnable() {
@Override
public void run() {
try {
//sleep, restart, and then wait active
TimeUnit.SECONDS.sleep(30);
assertFalse(HdfsUtils.isHealthy(uri));
cluster.restartNameNode(0, false);
cluster.waitActive();
assertTrue(HdfsUtils.isHealthy(uri));
} catch (Exception e) {
exceptions.add(e);
}
}
}).start();
//namenode is down, it should retry until namenode is up again.
final FileStatus s2 = fs.getFileStatus(file1);
assertEquals(s1, s2);
//check file1 and file3
thread.join();
assertEmpty(exceptions);
assertEquals(s1.getLen(), fs.getFileStatus(file3).getLen());
assertEquals(fs.getFileChecksum(file1), fs.getFileChecksum(file3));
reader.join();
assertEmpty(exceptions);
//check file4
file4thread.join();
assertEmpty(exceptions);
{
final FSDataInputStream in = fs.open(file4);
int count = 0;
for (int r; (r = in.read()) != -1; count++) {
Assert.assertEquals(String.format("count=%d", count), bytes[count % bytes.length], (byte) r);
}
if (!isWebHDFS) {
Assert.assertEquals(5 * bytes.length, count);
} else {
Assert.assertEquals(2 * bytes.length, count);
}
in.close();
}
//enter safe mode
assertTrue(HdfsUtils.isHealthy(uri));
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
assertFalse(HdfsUtils.isHealthy(uri));
//leave safe mode in a new thread
new Thread(new Runnable() {
@Override
public void run() {
try {
//sleep and then leave safe mode
TimeUnit.SECONDS.sleep(30);
assertFalse(HdfsUtils.isHealthy(uri));
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertTrue(HdfsUtils.isHealthy(uri));
} catch (Exception e) {
exceptions.add(e);
}
}
}).start();
//namenode is in safe mode, create should retry until it leaves safe mode.
final Path file2 = new Path(dir, "bar");
DFSTestUtil.createFile(fs, file2, length, numDatanodes, 20120406L);
assertEquals(fs.getFileChecksum(file1), fs.getFileChecksum(file2));
assertTrue(HdfsUtils.isHealthy(uri));
//make sure it won't retry on exceptions like FileNotFoundException
final Path nonExisting = new Path(dir, "nonExisting");
LOG.info("setPermission: " + nonExisting);
try {
fs.setPermission(nonExisting, new FsPermission((short) 0));
fail();
} catch (FileNotFoundException fnfe) {
LOG.info("GOOD!", fnfe);
}
assertEmpty(exceptions);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestDFSShell method testCopyCommandsToDirectoryWithPreserveOption.
// verify cp -ptopxa option will preserve directory attributes.
@Test(timeout = 120000)
public void testCopyCommandsToDirectoryWithPreserveOption() throws Exception {
FsShell shell = null;
final String testdir = "/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-" + counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
dfs.mkdirs(hdfsTestDir);
Path srcDir = new Path(hdfsTestDir, "srcDir");
dfs.mkdirs(srcDir);
dfs.setAcl(srcDir, Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, GROUP, "bar", READ_EXECUTE), aclEntry(ACCESS, OTHER, EXECUTE)));
// set sticky bit
dfs.setPermission(srcDir, new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
// Create a file in srcDir to check if modification time of
// srcDir to be preserved after copying the file.
// If cp -p command is to preserve modification time and then copy child
// (srcFile), modification time will not be preserved.
Path srcFile = new Path(srcDir, "srcFile");
dfs.create(srcFile).close();
FileStatus status = dfs.getFileStatus(srcDir);
final long mtime = status.getModificationTime();
final long atime = status.getAccessTime();
final String owner = status.getOwner();
final String group = status.getGroup();
final FsPermission perm = status.getPermission();
dfs.setXAttr(srcDir, USER_A1, USER_A1_VALUE);
dfs.setXAttr(srcDir, TRUSTED_A1, TRUSTED_A1_VALUE);
shell = new FsShell(dfs.getConf());
// -p
Path targetDir1 = new Path(hdfsTestDir, "targetDir1");
String[] argv = new String[] { "-cp", "-p", srcDir.toUri().toString(), targetDir1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
assertEquals("cp -p is not working", SUCCESS, ret);
FileStatus targetStatus = dfs.getFileStatus(targetDir1);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
FsPermission targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map<String, byte[]> xattrs = dfs.getXAttrs(targetDir1);
assertTrue(xattrs.isEmpty());
List<AclEntry> acls = dfs.getAclStatus(targetDir1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptop
Path targetDir2 = new Path(hdfsTestDir, "targetDir2");
argv = new String[] { "-cp", "-ptop", srcDir.toUri().toString(), targetDir2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptop is not working", SUCCESS, ret);
targetStatus = dfs.getFileStatus(targetDir2);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = dfs.getXAttrs(targetDir2);
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(targetDir2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptopx
Path targetDir3 = new Path(hdfsTestDir, "targetDir3");
argv = new String[] { "-cp", "-ptopx", srcDir.toUri().toString(), targetDir3.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptopx is not working", SUCCESS, ret);
targetStatus = dfs.getFileStatus(targetDir3);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = dfs.getXAttrs(targetDir3);
assertEquals(xattrs.size(), 2);
assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
acls = dfs.getAclStatus(targetDir3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptopa
Path targetDir4 = new Path(hdfsTestDir, "targetDir4");
argv = new String[] { "-cp", "-ptopa", srcDir.toUri().toString(), targetDir4.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptopa is not working", SUCCESS, ret);
targetStatus = dfs.getFileStatus(targetDir4);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = dfs.getXAttrs(targetDir4);
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(targetDir4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(dfs.getAclStatus(srcDir), dfs.getAclStatus(targetDir4));
// -ptoa (verify -pa option will preserve permissions also)
Path targetDir5 = new Path(hdfsTestDir, "targetDir5");
argv = new String[] { "-cp", "-ptoa", srcDir.toUri().toString(), targetDir5.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptoa is not working", SUCCESS, ret);
targetStatus = dfs.getFileStatus(targetDir5);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = dfs.getXAttrs(targetDir5);
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(targetDir5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(dfs.getAclStatus(srcDir), dfs.getAclStatus(targetDir5));
} finally {
if (shell != null) {
shell.close();
}
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestDFSShell method testSetXAttrPermission.
@Test(timeout = 30000)
public void testSetXAttrPermission() throws Exception {
UserGroupInformation user = UserGroupInformation.createUserForTesting("user", new String[] { "mygroup" });
PrintStream bak = null;
try {
Path p = new Path("/foo");
dfs.mkdirs(p);
bak = System.err;
final FsShell fshell = new FsShell(dfs.getConf());
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
// No permission to write xattr
dfs.setPermission(p, new FsPermission((short) 0700));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n", "user.a1", "-v", "1234", "/foo" });
assertEquals("Returned should be 1", 1, ret);
String str = out.toString();
assertTrue("Permission denied printed", str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n", "user.a1", "-v", "1234", "/foo" });
assertEquals("Returned should be 0", 0, ret);
out.reset();
// No permission to read and remove
dfs.setPermission(p, new FsPermission((short) 0750));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Read
int ret = ToolRunner.run(fshell, new String[] { "-getfattr", "-n", "user.a1", "/foo" });
assertEquals("Returned should be 1", 1, ret);
String str = out.toString();
assertTrue("Permission denied printed", str.indexOf("Permission denied") != -1);
out.reset();
// Remove
ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x", "user.a1", "/foo" });
assertEquals("Returned should be 1", 1, ret);
str = out.toString();
assertTrue("Permission denied printed", str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
} finally {
if (bak != null) {
System.setErr(bak);
}
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestDFSMkdirs method testMkdirRpcNonCanonicalPath.
/**
* Regression test for HDFS-3626. Creates a file using a non-canonical path
* (i.e. with extra slashes between components) and makes sure that the NN
* rejects it.
*/
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
try {
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
for (String pathStr : NON_CANONICAL_PATHS) {
try {
nnrpc.mkdirs(pathStr, new FsPermission((short) 0755), true);
fail("Did not fail when called with a non-canonicalized path: " + pathStr);
} catch (InvalidPathException ipe) {
// expected
}
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestDFSPermission method testAccessOthers.
@Test
public void testAccessOthers() throws IOException, InterruptedException {
FileSystem rootFs = FileSystem.get(conf);
Path p3 = new Path("/p3");
rootFs.mkdirs(p3);
rootFs.setPermission(p3, new FsPermission((short) 0774));
fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(conf);
}
});
fs.access(p3, FsAction.READ);
try {
fs.access(p3, FsAction.READ_WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
assertTrue("Permission denied messages must carry the username", e.getMessage().contains(USER1_NAME));
assertTrue("Permission denied messages must carry the path parent", e.getMessage().contains(p3.getParent().toUri().getPath()));
}
}
Aggregations