use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class DistributedFileSystem method getTrashRoots.
/**
* Get all the trash roots of HDFS for current user or for all the users.
* 1. File deleted from non-encryption zone /user/username/.Trash
* 2. File deleted from encryption zones
* e.g., ez1 rooted at /ez1 has its trash root at /ez1/.Trash/$USER
* @param allUsers return trashRoots of all users if true, used by emptier
* @return trash roots of HDFS
*/
@Override
public Collection<FileStatus> getTrashRoots(boolean allUsers) {
List<FileStatus> ret = new ArrayList<>();
// Get normal trash roots
ret.addAll(super.getTrashRoots(allUsers));
try {
// Get EZ Trash roots
final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
while (it.hasNext()) {
Path ezTrashRoot = new Path(it.next().getPath(), FileSystem.TRASH_PREFIX);
if (!exists(ezTrashRoot)) {
continue;
}
if (allUsers) {
for (FileStatus candidate : listStatus(ezTrashRoot)) {
if (exists(candidate.getPath())) {
ret.add(candidate);
}
}
} else {
Path userTrash = new Path(ezTrashRoot, System.getProperty("user.name"));
try {
ret.add(getFileStatus(userTrash));
} catch (FileNotFoundException ignored) {
}
}
}
} catch (IOException e) {
DFSClient.LOG.warn("Cannot get all encrypted trash roots", e);
}
return ret;
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestEncryptionZones method dTIEM.
private void dTIEM(Path prefix) throws Exception {
final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
// Create an unencrypted file to check isEncrypted returns false
final Path baseFile = new Path(prefix, "base");
fsWrapper.createFile(baseFile);
FileStatus stat = fsWrapper.getFileStatus(baseFile);
assertFalse("Expected isEncrypted to return false for " + baseFile, stat.isEncrypted());
// Create an encrypted file to check isEncrypted returns true
final Path zone = new Path(prefix, "zone");
fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
final Path encFile = new Path(zone, "encfile");
fsWrapper.createFile(encFile);
stat = fsWrapper.getFileStatus(encFile);
assertTrue("Expected isEncrypted to return true for enc file" + encFile, stat.isEncrypted());
// check that it returns true for an ez root
stat = fsWrapper.getFileStatus(zone);
assertTrue("Expected isEncrypted to return true for ezroot", stat.isEncrypted());
// check that it returns true for a dir in the ez
final Path zoneSubdir = new Path(zone, "subdir");
fsWrapper.mkdir(zoneSubdir, FsPermission.getDirDefault(), true);
stat = fsWrapper.getFileStatus(zoneSubdir);
assertTrue("Expected isEncrypted to return true for ez subdir " + zoneSubdir, stat.isEncrypted());
// check that it returns false for a non ez dir
final Path nonEzDirPath = new Path(prefix, "nonzone");
fsWrapper.mkdir(nonEzDirPath, FsPermission.getDirDefault(), true);
stat = fsWrapper.getFileStatus(nonEzDirPath);
assertFalse("Expected isEncrypted to return false for directory " + nonEzDirPath, stat.isEncrypted());
// check that it returns true for listings within an ez
FileStatus[] statuses = fsWrapper.listStatus(zone);
for (FileStatus s : statuses) {
assertTrue("Expected isEncrypted to return true for ez stat " + zone, s.isEncrypted());
}
statuses = fsWrapper.listStatus(encFile);
for (FileStatus s : statuses) {
assertTrue("Expected isEncrypted to return true for ez file stat " + encFile, s.isEncrypted());
}
// check that it returns false for listings outside an ez
statuses = fsWrapper.listStatus(nonEzDirPath);
for (FileStatus s : statuses) {
assertFalse("Expected isEncrypted to return false for nonez stat " + nonEzDirPath, s.isEncrypted());
}
statuses = fsWrapper.listStatus(baseFile);
for (FileStatus s : statuses) {
assertFalse("Expected isEncrypted to return false for non ez stat " + baseFile, s.isEncrypted());
}
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestDistributedFileSystem method testStatistics.
@Test
public void testStatistics() throws IOException {
FileSystem.getStatistics(HdfsConstants.HDFS_URI_SCHEME, DistributedFileSystem.class).reset();
@SuppressWarnings("unchecked") ThreadLocal<StatisticsData> data = (ThreadLocal<StatisticsData>) Whitebox.getInternalState(FileSystem.getStatistics(HdfsConstants.HDFS_URI_SCHEME, DistributedFileSystem.class), "threadData");
data.set(null);
int lsLimit = 2;
final Configuration conf = getTestConfiguration();
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, lsLimit);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
final FileSystem fs = cluster.getFileSystem();
Path dir = new Path("/test");
Path file = new Path(dir, "file");
int readOps = 0;
int writeOps = 0;
int largeReadOps = 0;
long opCount = getOpStatistics(OpType.MKDIRS);
fs.mkdirs(dir);
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
checkOpStatistics(OpType.MKDIRS, opCount + 1);
opCount = getOpStatistics(OpType.CREATE);
FSDataOutputStream out = fs.create(file, (short) 1);
out.close();
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
checkOpStatistics(OpType.CREATE, opCount + 1);
opCount = getOpStatistics(OpType.GET_FILE_STATUS);
FileStatus status = fs.getFileStatus(file);
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
checkOpStatistics(OpType.GET_FILE_STATUS, opCount + 1);
opCount = getOpStatistics(OpType.GET_FILE_BLOCK_LOCATIONS);
fs.getFileBlockLocations(file, 0, 0);
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
checkOpStatistics(OpType.GET_FILE_BLOCK_LOCATIONS, opCount + 1);
fs.getFileBlockLocations(status, 0, 0);
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
checkOpStatistics(OpType.GET_FILE_BLOCK_LOCATIONS, opCount + 2);
opCount = getOpStatistics(OpType.OPEN);
FSDataInputStream in = fs.open(file);
in.close();
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
checkOpStatistics(OpType.OPEN, opCount + 1);
opCount = getOpStatistics(OpType.SET_REPLICATION);
fs.setReplication(file, (short) 2);
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
checkOpStatistics(OpType.SET_REPLICATION, opCount + 1);
opCount = getOpStatistics(OpType.RENAME);
Path file1 = new Path(dir, "file1");
fs.rename(file, file1);
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
checkOpStatistics(OpType.RENAME, opCount + 1);
opCount = getOpStatistics(OpType.GET_CONTENT_SUMMARY);
fs.getContentSummary(file1);
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
checkOpStatistics(OpType.GET_CONTENT_SUMMARY, opCount + 1);
// Iterative ls test
long mkdirOp = getOpStatistics(OpType.MKDIRS);
long listStatusOp = getOpStatistics(OpType.LIST_STATUS);
for (int i = 0; i < 10; i++) {
Path p = new Path(dir, Integer.toString(i));
fs.mkdirs(p);
mkdirOp++;
FileStatus[] list = fs.listStatus(dir);
if (list.length > lsLimit) {
// if large directory, then count readOps and largeReadOps by
// number times listStatus iterates
int iterations = (int) Math.ceil((double) list.length / lsLimit);
largeReadOps += iterations;
readOps += iterations;
listStatusOp += iterations;
} else {
// Single iteration in listStatus - no large read operation done
readOps++;
listStatusOp++;
}
// writeOps incremented by 1 for mkdirs
// readOps and largeReadOps incremented by 1 or more
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
checkOpStatistics(OpType.MKDIRS, mkdirOp);
checkOpStatistics(OpType.LIST_STATUS, listStatusOp);
}
opCount = getOpStatistics(OpType.GET_STATUS);
fs.getStatus(file1);
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
checkOpStatistics(OpType.GET_STATUS, opCount + 1);
opCount = getOpStatistics(OpType.GET_FILE_CHECKSUM);
fs.getFileChecksum(file1);
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
checkOpStatistics(OpType.GET_FILE_CHECKSUM, opCount + 1);
opCount = getOpStatistics(OpType.SET_PERMISSION);
fs.setPermission(file1, new FsPermission((short) 0777));
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
checkOpStatistics(OpType.SET_PERMISSION, opCount + 1);
opCount = getOpStatistics(OpType.SET_TIMES);
fs.setTimes(file1, 0L, 0L);
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
checkOpStatistics(OpType.SET_TIMES, opCount + 1);
opCount = getOpStatistics(OpType.SET_OWNER);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
fs.setOwner(file1, ugi.getUserName(), ugi.getGroupNames()[0]);
checkOpStatistics(OpType.SET_OWNER, opCount + 1);
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
opCount = getOpStatistics(OpType.DELETE);
fs.delete(dir, true);
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
checkOpStatistics(OpType.DELETE, opCount + 1);
} finally {
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestEncryptionZones method testProvisionTrash.
/**
* Make sure hdfs crypto -provisionTrash command creates a trash directory
* with sticky bits.
* @throws Exception
*/
@Test
public void testProvisionTrash() throws Exception {
// create an EZ /zones/zone1
final Path zoneParent = new Path("/zones");
final Path zone1 = new Path(zoneParent, "zone1");
CryptoAdmin cryptoAdmin = new CryptoAdmin(conf);
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
String[] cryptoArgv = new String[] { "-createZone", "-keyName", TEST_KEY, "-path", zone1.toUri().getPath() };
cryptoAdmin.run(cryptoArgv);
// remove the trash directory
Configuration clientConf = new Configuration(conf);
clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
final FsShell shell = new FsShell(clientConf);
final Path trashDir = new Path(zone1, FileSystem.TRASH_PREFIX);
String[] argv = new String[] { "-rmdir", trashDir.toUri().getPath() };
int res = ToolRunner.run(shell, argv);
assertEquals("Unable to delete trash directory.", 0, res);
assertFalse(fsWrapper.exists(trashDir));
// execute -provisionTrash command option and make sure the trash
// directory has sticky bit.
String[] provisionTrashArgv = new String[] { "-provisionTrash", "-path", zone1.toUri().getPath() };
cryptoAdmin.run(provisionTrashArgv);
assertTrue(fsWrapper.exists(trashDir));
FileStatus trashFileStatus = fsWrapper.getFileStatus(trashDir);
assertTrue(trashFileStatus.getPermission().getStickyBit());
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestErasureCodeBenchmarkThroughput method verifyNumFile.
private static void verifyNumFile(final int dataSize, final boolean isEc, int numFile) throws IOException {
Path path = isEc ? new Path(ErasureCodeBenchmarkThroughput.EC_DIR) : new Path(ErasureCodeBenchmarkThroughput.REP_DIR);
FileStatus[] statuses = fs.listStatus(path, new PathFilter() {
@Override
public boolean accept(Path path) {
return path.toString().contains(ErasureCodeBenchmarkThroughput.getFilePath(dataSize, isEc));
}
});
Assert.assertEquals(numFile, statuses.length);
}
Aggregations