Search in sources :

Example 76 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestSnapshotListing method testListSnapshots.

/**
   * Test listing snapshots under a snapshottable directory
   */
@Test(timeout = 15000)
public void testListSnapshots() throws Exception {
    final Path snapshotsPath = new Path(dir, ".snapshot");
    FileStatus[] stats = null;
    // special case: snapshots of root
    stats = hdfs.listStatus(new Path("/.snapshot"));
    // should be 0 since root's snapshot quota is 0
    assertEquals(0, stats.length);
    // list before set dir as snapshottable
    try {
        stats = hdfs.listStatus(snapshotsPath);
        fail("expect SnapshotException");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + dir.toString(), e);
    }
    // list before creating snapshots
    hdfs.allowSnapshot(dir);
    stats = hdfs.listStatus(snapshotsPath);
    assertEquals(0, stats.length);
    // list while creating snapshots
    final int snapshotNum = 5;
    for (int sNum = 0; sNum < snapshotNum; sNum++) {
        hdfs.createSnapshot(dir, "s_" + sNum);
        stats = hdfs.listStatus(snapshotsPath);
        assertEquals(sNum + 1, stats.length);
        for (int i = 0; i <= sNum; i++) {
            assertEquals("s_" + i, stats[i].getPath().getName());
        }
    }
    // list while deleting snapshots
    for (int sNum = snapshotNum - 1; sNum > 0; sNum--) {
        hdfs.deleteSnapshot(dir, "s_" + sNum);
        stats = hdfs.listStatus(snapshotsPath);
        assertEquals(sNum, stats.length);
        for (int i = 0; i < sNum; i++) {
            assertEquals("s_" + i, stats[i].getPath().getName());
        }
    }
    // remove the last snapshot
    hdfs.deleteSnapshot(dir, "s_0");
    stats = hdfs.listStatus(snapshotsPath);
    assertEquals(0, stats.length);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) IOException(java.io.IOException) Test(org.junit.Test)

Example 77 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestSecureIOUtils method makeTestFile.

@BeforeClass
public static void makeTestFile() throws Exception {
    Configuration conf = new Configuration();
    fs = FileSystem.getLocal(conf).getRaw();
    testFilePathIs = new File((new Path("target", TestSecureIOUtils.class.getSimpleName() + "1")).toUri().getRawPath());
    testFilePathRaf = new File((new Path("target", TestSecureIOUtils.class.getSimpleName() + "2")).toUri().getRawPath());
    testFilePathFadis = new File((new Path("target", TestSecureIOUtils.class.getSimpleName() + "3")).toUri().getRawPath());
    for (File f : new File[] { testFilePathIs, testFilePathRaf, testFilePathFadis }) {
        FileOutputStream fos = new FileOutputStream(f);
        fos.write("hello".getBytes("UTF-8"));
        fos.close();
    }
    FileStatus stat = fs.getFileStatus(new Path(testFilePathIs.toString()));
    // RealOwner and RealGroup would be same for all three files.
    realOwner = stat.getOwner();
    realGroup = stat.getGroup();
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) FileOutputStream(java.io.FileOutputStream) File(java.io.File) BeforeClass(org.junit.BeforeClass)

Example 78 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestCodec method testSplitableCodec.

private void testSplitableCodec(Class<? extends SplittableCompressionCodec> codecClass) throws IOException {
    final long DEFLBYTES = 2 * 1024 * 1024;
    final Configuration conf = new Configuration();
    final Random rand = new Random();
    final long seed = rand.nextLong();
    LOG.info("seed: " + seed);
    rand.setSeed(seed);
    SplittableCompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);
    final FileSystem fs = FileSystem.getLocal(conf);
    final FileStatus infile = fs.getFileStatus(writeSplitTestFile(fs, rand, codec, DEFLBYTES));
    if (infile.getLen() > Integer.MAX_VALUE) {
        fail("Unexpected compression: " + DEFLBYTES + " -> " + infile.getLen());
    }
    final int flen = (int) infile.getLen();
    final Text line = new Text();
    final Decompressor dcmp = CodecPool.getDecompressor(codec);
    try {
        for (int pos = 0; pos < infile.getLen(); pos += rand.nextInt(flen / 8)) {
            // read from random positions, verifying that there exist two sequential
            // lines as written in writeSplitTestFile
            final SplitCompressionInputStream in = codec.createInputStream(fs.open(infile.getPath()), dcmp, pos, flen, SplittableCompressionCodec.READ_MODE.BYBLOCK);
            if (in.getAdjustedStart() >= flen) {
                break;
            }
            LOG.info("SAMPLE " + in.getAdjustedStart() + "," + in.getAdjustedEnd());
            final LineReader lreader = new LineReader(in);
            // ignore; likely partial
            lreader.readLine(line);
            if (in.getPos() >= flen) {
                break;
            }
            lreader.readLine(line);
            final int seq1 = readLeadingInt(line);
            lreader.readLine(line);
            if (in.getPos() >= flen) {
                break;
            }
            final int seq2 = readLeadingInt(line);
            assertEquals("Mismatched lines", seq1 + 1, seq2);
        }
    } finally {
        CodecPool.returnDecompressor(dcmp);
    }
    // remove on success
    fs.delete(infile.getPath().getParent(), true);
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) BuiltInGzipDecompressor(org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor) Configuration(org.apache.hadoop.conf.Configuration) Random(java.util.Random) FileSystem(org.apache.hadoop.fs.FileSystem) LineReader(org.apache.hadoop.util.LineReader) Text(org.apache.hadoop.io.Text)

Example 79 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestCredentialProviderFactory method testLocalJksProvider.

@Test
public void testLocalJksProvider() throws Exception {
    Configuration conf = new Configuration();
    final Path jksPath = new Path(tmpDir.toString(), "test.jks");
    final String ourUrl = LocalJavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
    File file = new File(tmpDir, "test.jks");
    file.delete();
    conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
    checkSpecificProvider(conf, ourUrl);
    Path path = ProviderUtils.unnestUri(new URI(ourUrl));
    FileSystem fs = path.getFileSystem(conf);
    FileStatus s = fs.getFileStatus(path);
    assertTrue("Unexpected permissions: " + s.getPermission().toString(), s.getPermission().toString().equals("rw-------"));
    assertTrue(file + " should exist", file.isFile());
    // check permission retention after explicit change
    fs.setPermission(path, new FsPermission("777"));
    checkPermissionRetention(conf, ourUrl, path);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FsPermission(org.apache.hadoop.fs.permission.FsPermission) File(java.io.File) URI(java.net.URI) Test(org.junit.Test)

Example 80 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class DistributedFileSystem method getFileStatus.

/**
   * Returns the stat information about the file.
   * @throws FileNotFoundException if the file does not exist.
   */
@Override
public FileStatus getFileStatus(Path f) throws IOException {
    statistics.incrementReadOps(1);
    storageStatistics.incrementOpCounter(OpType.GET_FILE_STATUS);
    Path absF = fixRelativePart(f);
    return new FileSystemLinkResolver<FileStatus>() {

        @Override
        public FileStatus doCall(final Path p) throws IOException {
            HdfsFileStatus fi = dfs.getFileInfo(getPathName(p));
            if (fi != null) {
                return fi.makeQualified(getUri(), p);
            } else {
                throw new FileNotFoundException("File does not exist: " + p);
            }
        }

        @Override
        public FileStatus next(final FileSystem fs, final Path p) throws IOException {
            return fs.getFileStatus(p);
        }
    }.resolve(this, absF);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) HdfsLocatedFileStatus(org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException)

Aggregations

FileStatus (org.apache.hadoop.fs.FileStatus)1156 Path (org.apache.hadoop.fs.Path)910 FileSystem (org.apache.hadoop.fs.FileSystem)417 Test (org.junit.Test)372 IOException (java.io.IOException)296 Configuration (org.apache.hadoop.conf.Configuration)187 ArrayList (java.util.ArrayList)175 FileNotFoundException (java.io.FileNotFoundException)136 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)105 FsPermission (org.apache.hadoop.fs.permission.FsPermission)86 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)67 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)65 HashMap (java.util.HashMap)54 File (java.io.File)41 URI (java.net.URI)41 PathFilter (org.apache.hadoop.fs.PathFilter)38 BufferedReader (java.io.BufferedReader)30 InputStreamReader (java.io.InputStreamReader)30 BlockLocation (org.apache.hadoop.fs.BlockLocation)30 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)30