use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestFileAppend2 method testSimpleAppend.
/**
* Creates one file, writes a few bytes to it and then closed it.
* Reopens the same file for appending, write all blocks and then close.
* Verify that all data exists in file.
* @throws IOException an exception might be thrown
*/
@Test
public void testSimpleAppend() throws IOException {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
try {
{
// test appending to a file.
// create a new file.
Path file1 = new Path("/simpleAppend.dat");
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
System.out.println("Created file simpleAppend.dat");
// write to file
// io.bytes.per.checksum bytes
int mid = 186;
System.out.println("Writing " + mid + " bytes to file " + file1);
stm.write(fileContents, 0, mid);
stm.close();
System.out.println("Wrote and Closed first part of file.");
// write to file
// io.bytes.per.checksum bytes
int mid2 = 607;
System.out.println("Writing " + mid + " bytes to file " + file1);
stm = fs.append(file1);
stm.write(fileContents, mid, mid2 - mid);
stm.close();
System.out.println("Wrote and Closed second part of file.");
// write the remainder of the file
stm = fs.append(file1);
// ensure getPos is set to reflect existing size of the file
assertTrue(stm.getPos() > 0);
System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) + " bytes to file " + file1);
stm.write(fileContents, mid2, AppendTestUtil.FILE_SIZE - mid2);
System.out.println("Written second part of file");
stm.close();
System.out.println("Wrote and Closed second part of file.");
// verify that entire file is good
AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");
}
{
// test appending to an non-existing file.
FSDataOutputStream out = null;
try {
out = fs.append(new Path("/non-existing.dat"));
fail("Expected to have FileNotFoundException");
} catch (java.io.FileNotFoundException fnfe) {
System.out.println("Good: got " + fnfe);
fnfe.printStackTrace(System.out);
} finally {
IOUtils.closeStream(out);
}
}
{
// test append permission.
//set root to all writable
Path root = new Path("/");
fs.setPermission(root, new FsPermission((short) 0777));
fs.close();
// login as a different user
final UserGroupInformation superuser = UserGroupInformation.getCurrentUser();
String username = "testappenduser";
String group = "testappendgroup";
assertFalse(superuser.getShortUserName().equals(username));
assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
UserGroupInformation appenduser = UserGroupInformation.createUserForTesting(username, new String[] { group });
fs = DFSTestUtil.getFileSystemAs(appenduser, conf);
// create a file
Path dir = new Path(root, getClass().getSimpleName());
Path foo = new Path(dir, "foo.dat");
FSDataOutputStream out = null;
int offset = 0;
try {
out = fs.create(foo);
int len = 10 + AppendTestUtil.nextInt(100);
out.write(fileContents, offset, len);
offset += len;
} finally {
IOUtils.closeStream(out);
}
// change dir and foo to minimal permissions.
fs.setPermission(dir, new FsPermission((short) 0100));
fs.setPermission(foo, new FsPermission((short) 0200));
// try append, should success
out = null;
try {
out = fs.append(foo);
int len = 10 + AppendTestUtil.nextInt(100);
out.write(fileContents, offset, len);
offset += len;
} finally {
IOUtils.closeStream(out);
}
// change dir and foo to all but no write on foo.
fs.setPermission(foo, new FsPermission((short) 0577));
fs.setPermission(dir, new FsPermission((short) 0777));
// try append, should fail
out = null;
try {
out = fs.append(foo);
fail("Expected to have AccessControlException");
} catch (AccessControlException ace) {
System.out.println("Good: got " + ace);
ace.printStackTrace(System.out);
} finally {
IOUtils.closeStream(out);
}
}
} catch (IOException e) {
System.out.println("Exception :" + e);
throw e;
} catch (Throwable e) {
System.out.println("Throwable :" + e);
e.printStackTrace();
throw new IOException("Throwable : " + e);
} finally {
fs.close();
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestFsck method testFsckFileNotFound.
/** Test fsck with FileNotFound. */
@Test
public void testFsckFileNotFound() throws Exception {
// Number of replicas to actually start
final short numReplicas = 1;
NameNode namenode = mock(NameNode.class);
NetworkTopology nettop = mock(NetworkTopology.class);
Map<String, String[]> pmap = new HashMap<>();
Writer result = new StringWriter();
PrintWriter out = new PrintWriter(result, true);
InetAddress remoteAddress = InetAddress.getLocalHost();
FSNamesystem fsName = mock(FSNamesystem.class);
FSDirectory fsd = mock(FSDirectory.class);
BlockManager blockManager = mock(BlockManager.class);
DatanodeManager dnManager = mock(DatanodeManager.class);
INodesInPath iip = mock(INodesInPath.class);
when(namenode.getNamesystem()).thenReturn(fsName);
when(fsName.getBlockManager()).thenReturn(blockManager);
when(fsName.getFSDirectory()).thenReturn(fsd);
when(fsd.getFSNamesystem()).thenReturn(fsName);
when(fsd.resolvePath(anyObject(), anyString(), any(DirOp.class))).thenReturn(iip);
when(blockManager.getDatanodeManager()).thenReturn(dnManager);
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
String pathString = "/tmp/testFile";
long length = 123L;
boolean isDir = false;
int blockReplication = 1;
long blockSize = 128 * 1024L;
long modTime = 123123123L;
long accessTime = 123123120L;
FsPermission perms = FsPermission.getDefault();
String owner = "foo";
String group = "bar";
byte[] symlink = null;
byte[] path = DFSUtil.string2Bytes(pathString);
long fileId = 312321L;
int numChildren = 1;
byte storagePolicy = 0;
HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, fileId, numChildren, null, storagePolicy, null);
Result replRes = new ReplicationResult(conf);
Result ecRes = new ErasureCodingResult(conf);
try {
fsck.check(pathString, file, replRes, ecRes);
} catch (Exception e) {
fail("Unexpected exception " + e.getMessage());
}
assertTrue(replRes.isHealthy());
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestINodeFile method testValidSymlinkTarget.
private void testValidSymlinkTarget(NamenodeProtocols nnRpc, String target, String link) throws IOException {
FsPermission perm = FsPermission.createImmutable((short) 0755);
nnRpc.createSymlink(target, link, perm, false);
assertEquals(target, nnRpc.getLinkTarget(link));
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestINodeFile method testInodeIdBasedPaths.
/**
* Tests for addressing files using /.reserved/.inodes/<inodeID> in file system
* operations.
*/
@Test
public void testInodeIdBasedPaths() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
NamenodeProtocols nnRpc = cluster.getNameNodeRpc();
// FileSystem#mkdirs "/testInodeIdBasedPaths"
Path baseDir = getInodePath(INodeId.ROOT_INODE_ID, "testInodeIdBasedPaths");
Path baseDirRegPath = new Path("/testInodeIdBasedPaths");
fs.mkdirs(baseDir);
fs.exists(baseDir);
long baseDirFileId = nnRpc.getFileInfo(baseDir.toString()).getFileId();
// FileSystem#create file and FileSystem#close
Path testFileInodePath = getInodePath(baseDirFileId, "test1");
Path testFileRegularPath = new Path(baseDir, "test1");
final int testFileBlockSize = 1024;
FileSystemTestHelper.createFile(fs, testFileInodePath, 1, testFileBlockSize);
assertTrue(fs.exists(testFileInodePath));
// FileSystem#setPermission
FsPermission perm = new FsPermission((short) 0666);
fs.setPermission(testFileInodePath, perm);
// FileSystem#getFileStatus and FileSystem#getPermission
FileStatus fileStatus = fs.getFileStatus(testFileInodePath);
assertEquals(perm, fileStatus.getPermission());
// FileSystem#setOwner
fs.setOwner(testFileInodePath, fileStatus.getOwner(), fileStatus.getGroup());
// FileSystem#setTimes
fs.setTimes(testFileInodePath, 0, 0);
fileStatus = fs.getFileStatus(testFileInodePath);
assertEquals(0, fileStatus.getModificationTime());
assertEquals(0, fileStatus.getAccessTime());
// FileSystem#setReplication
fs.setReplication(testFileInodePath, (short) 3);
fileStatus = fs.getFileStatus(testFileInodePath);
assertEquals(3, fileStatus.getReplication());
fs.setReplication(testFileInodePath, (short) 1);
// ClientProtocol#getPreferredBlockSize
assertEquals(testFileBlockSize, nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
/*
* HDFS-6749 added missing calls to FSDirectory.resolvePath in the
* following four methods. The calls below ensure that
* /.reserved/.inodes paths work properly. No need to check return
* values as these methods are tested elsewhere.
*/
{
fs.isFileClosed(testFileInodePath);
fs.getAclStatus(testFileInodePath);
fs.getXAttrs(testFileInodePath);
fs.listXAttrs(testFileInodePath);
fs.access(testFileInodePath, FsAction.READ_WRITE);
}
// symbolic link related tests
// Reserved path is not allowed as a target
String invalidTarget = new Path(baseDir, "invalidTarget").toString();
String link = new Path(baseDir, "link").toString();
testInvalidSymlinkTarget(nnRpc, invalidTarget, link);
// Test creating a link using reserved inode path
String validTarget = "/validtarget";
testValidSymlinkTarget(nnRpc, validTarget, link);
// FileSystem#append
fs.append(testFileInodePath);
// DistributedFileSystem#recoverLease
fs.recoverLease(testFileInodePath);
// Namenode#getBlockLocations
LocatedBlocks l1 = nnRpc.getBlockLocations(testFileInodePath.toString(), 0, Long.MAX_VALUE);
LocatedBlocks l2 = nnRpc.getBlockLocations(testFileRegularPath.toString(), 0, Long.MAX_VALUE);
checkEquals(l1, l2);
// FileSystem#rename - both the variants
Path renameDst = getInodePath(baseDirFileId, "test2");
fileStatus = fs.getFileStatus(testFileInodePath);
// Rename variant 1: rename and rename bacck
fs.rename(testFileInodePath, renameDst);
fs.rename(renameDst, testFileInodePath);
assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
// Rename variant 2: rename and rename bacck
fs.rename(testFileInodePath, renameDst, Rename.OVERWRITE);
fs.rename(renameDst, testFileInodePath, Rename.OVERWRITE);
assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
// FileSystem#getContentSummary
assertEquals(fs.getContentSummary(testFileRegularPath).toString(), fs.getContentSummary(testFileInodePath).toString());
// FileSystem#listFiles
checkEquals(fs.listFiles(baseDirRegPath, false), fs.listFiles(baseDir, false));
// FileSystem#delete
fs.delete(testFileInodePath, true);
assertFalse(fs.exists(testFileInodePath));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestNativeIO method assertPermissions.
private void assertPermissions(File f, int expected) throws IOException {
FileSystem localfs = FileSystem.getLocal(new Configuration());
FsPermission perms = localfs.getFileStatus(new Path(f.getAbsolutePath())).getPermission();
assertEquals(expected, perms.toShort());
}
Aggregations