use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestSnapshotListing method testListSnapshots.
/**
* Test listing snapshots under a snapshottable directory
*/
@Test(timeout = 15000)
public void testListSnapshots() throws Exception {
final Path snapshotsPath = new Path(dir, ".snapshot");
FileStatus[] stats = null;
// special case: snapshots of root
stats = hdfs.listStatus(new Path("/.snapshot"));
// should be 0 since root's snapshot quota is 0
assertEquals(0, stats.length);
// list before set dir as snapshottable
try {
stats = hdfs.listStatus(snapshotsPath);
fail("expect SnapshotException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + dir.toString(), e);
}
// list before creating snapshots
hdfs.allowSnapshot(dir);
stats = hdfs.listStatus(snapshotsPath);
assertEquals(0, stats.length);
// list while creating snapshots
final int snapshotNum = 5;
for (int sNum = 0; sNum < snapshotNum; sNum++) {
hdfs.createSnapshot(dir, "s_" + sNum);
stats = hdfs.listStatus(snapshotsPath);
assertEquals(sNum + 1, stats.length);
for (int i = 0; i <= sNum; i++) {
assertEquals("s_" + i, stats[i].getPath().getName());
}
}
// list while deleting snapshots
for (int sNum = snapshotNum - 1; sNum > 0; sNum--) {
hdfs.deleteSnapshot(dir, "s_" + sNum);
stats = hdfs.listStatus(snapshotsPath);
assertEquals(sNum, stats.length);
for (int i = 0; i < sNum; i++) {
assertEquals("s_" + i, stats[i].getPath().getName());
}
}
// remove the last snapshot
hdfs.deleteSnapshot(dir, "s_0");
stats = hdfs.listStatus(snapshotsPath);
assertEquals(0, stats.length);
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestSecureIOUtils method makeTestFile.
@BeforeClass
public static void makeTestFile() throws Exception {
Configuration conf = new Configuration();
fs = FileSystem.getLocal(conf).getRaw();
testFilePathIs = new File((new Path("target", TestSecureIOUtils.class.getSimpleName() + "1")).toUri().getRawPath());
testFilePathRaf = new File((new Path("target", TestSecureIOUtils.class.getSimpleName() + "2")).toUri().getRawPath());
testFilePathFadis = new File((new Path("target", TestSecureIOUtils.class.getSimpleName() + "3")).toUri().getRawPath());
for (File f : new File[] { testFilePathIs, testFilePathRaf, testFilePathFadis }) {
FileOutputStream fos = new FileOutputStream(f);
fos.write("hello".getBytes("UTF-8"));
fos.close();
}
FileStatus stat = fs.getFileStatus(new Path(testFilePathIs.toString()));
// RealOwner and RealGroup would be same for all three files.
realOwner = stat.getOwner();
realGroup = stat.getGroup();
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestCodec method testSplitableCodec.
private void testSplitableCodec(Class<? extends SplittableCompressionCodec> codecClass) throws IOException {
final long DEFLBYTES = 2 * 1024 * 1024;
final Configuration conf = new Configuration();
final Random rand = new Random();
final long seed = rand.nextLong();
LOG.info("seed: " + seed);
rand.setSeed(seed);
SplittableCompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);
final FileSystem fs = FileSystem.getLocal(conf);
final FileStatus infile = fs.getFileStatus(writeSplitTestFile(fs, rand, codec, DEFLBYTES));
if (infile.getLen() > Integer.MAX_VALUE) {
fail("Unexpected compression: " + DEFLBYTES + " -> " + infile.getLen());
}
final int flen = (int) infile.getLen();
final Text line = new Text();
final Decompressor dcmp = CodecPool.getDecompressor(codec);
try {
for (int pos = 0; pos < infile.getLen(); pos += rand.nextInt(flen / 8)) {
// read from random positions, verifying that there exist two sequential
// lines as written in writeSplitTestFile
final SplitCompressionInputStream in = codec.createInputStream(fs.open(infile.getPath()), dcmp, pos, flen, SplittableCompressionCodec.READ_MODE.BYBLOCK);
if (in.getAdjustedStart() >= flen) {
break;
}
LOG.info("SAMPLE " + in.getAdjustedStart() + "," + in.getAdjustedEnd());
final LineReader lreader = new LineReader(in);
// ignore; likely partial
lreader.readLine(line);
if (in.getPos() >= flen) {
break;
}
lreader.readLine(line);
final int seq1 = readLeadingInt(line);
lreader.readLine(line);
if (in.getPos() >= flen) {
break;
}
final int seq2 = readLeadingInt(line);
assertEquals("Mismatched lines", seq1 + 1, seq2);
}
} finally {
CodecPool.returnDecompressor(dcmp);
}
// remove on success
fs.delete(infile.getPath().getParent(), true);
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestCredentialProviderFactory method testLocalJksProvider.
@Test
public void testLocalJksProvider() throws Exception {
Configuration conf = new Configuration();
final Path jksPath = new Path(tmpDir.toString(), "test.jks");
final String ourUrl = LocalJavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(tmpDir, "test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
checkSpecificProvider(conf, ourUrl);
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue("Unexpected permissions: " + s.getPermission().toString(), s.getPermission().toString().equals("rw-------"));
assertTrue(file + " should exist", file.isFile());
// check permission retention after explicit change
fs.setPermission(path, new FsPermission("777"));
checkPermissionRetention(conf, ourUrl, path);
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class DistributedFileSystem method getFileStatus.
/**
* Returns the stat information about the file.
* @throws FileNotFoundException if the file does not exist.
*/
@Override
public FileStatus getFileStatus(Path f) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_STATUS);
Path absF = fixRelativePart(f);
return new FileSystemLinkResolver<FileStatus>() {
@Override
public FileStatus doCall(final Path p) throws IOException {
HdfsFileStatus fi = dfs.getFileInfo(getPathName(p));
if (fi != null) {
return fi.makeQualified(getUri(), p);
} else {
throw new FileNotFoundException("File does not exist: " + p);
}
}
@Override
public FileStatus next(final FileSystem fs, final Path p) throws IOException {
return fs.getFileStatus(p);
}
}.resolve(this, absF);
}
Aggregations