Search in sources :

Example 71 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestCacheDirectives method checkNumCachedReplicas.

private static void checkNumCachedReplicas(final DistributedFileSystem dfs, final List<Path> paths, final int expectedBlocks, final int expectedReplicas) throws Exception {
    int numCachedBlocks = 0;
    int numCachedReplicas = 0;
    for (Path p : paths) {
        final FileStatus f = dfs.getFileStatus(p);
        final long len = f.getLen();
        final long blockSize = f.getBlockSize();
        // round it up to full blocks
        final long numBlocks = (len + blockSize - 1) / blockSize;
        BlockLocation[] locs = dfs.getFileBlockLocations(p, 0, len);
        assertEquals("Unexpected number of block locations for path " + p, numBlocks, locs.length);
        for (BlockLocation l : locs) {
            if (l.getCachedHosts().length > 0) {
                numCachedBlocks++;
            }
            numCachedReplicas += l.getCachedHosts().length;
        }
    }
    LOG.info("Found " + numCachedBlocks + " of " + expectedBlocks + " blocks");
    LOG.info("Found " + numCachedReplicas + " of " + expectedReplicas + " replicas");
    assertEquals("Unexpected number of cached blocks", expectedBlocks, numCachedBlocks);
    assertEquals("Unexpected number of cached replicas", expectedReplicas, numCachedReplicas);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) BlockLocation(org.apache.hadoop.fs.BlockLocation)

Example 72 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestSnapshotDeletion method testDeleteEarliestSnapshot1.

/**
   * Test deleting the earliest (first) snapshot. In this simplest scenario, the 
   * snapshots are taken on the same directory, and we do not need to combine
   * snapshot diffs.
   */
@Test(timeout = 300000)
public void testDeleteEarliestSnapshot1() throws Exception {
    // create files under sub
    Path file0 = new Path(sub, "file0");
    Path file1 = new Path(sub, "file1");
    DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
    String snapshotName = "s1";
    try {
        hdfs.deleteSnapshot(sub, snapshotName);
        fail("SnapshotException expected: " + sub.toString() + " is not snapshottable yet");
    } catch (Exception e) {
        GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + sub, e);
    }
    // make sub snapshottable
    hdfs.allowSnapshot(sub);
    try {
        hdfs.deleteSnapshot(sub, snapshotName);
        fail("SnapshotException expected: snapshot " + snapshotName + " does not exist for " + sub.toString());
    } catch (Exception e) {
        GenericTestUtils.assertExceptionContains("Cannot delete snapshot " + snapshotName + " from path " + sub.toString() + ": the snapshot does not exist.", e);
    }
    // create snapshot s1 for sub
    SnapshotTestHelper.createSnapshot(hdfs, sub, snapshotName);
    // check quota usage computation
    checkQuotaUsageComputation(sub, 3, BLOCKSIZE * REPLICATION * 2);
    // delete s1
    hdfs.deleteSnapshot(sub, snapshotName);
    checkQuotaUsageComputation(sub, 3, BLOCKSIZE * REPLICATION * 2);
    // now we can create a snapshot with the same name
    hdfs.createSnapshot(sub, snapshotName);
    checkQuotaUsageComputation(sub, 3, BLOCKSIZE * REPLICATION * 2);
    // create a new file under sub
    Path newFile = new Path(sub, "newFile");
    DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
    // create another snapshot s2
    String snapshotName2 = "s2";
    hdfs.createSnapshot(sub, snapshotName2);
    checkQuotaUsageComputation(sub, 4, BLOCKSIZE * REPLICATION * 3);
    // Get the filestatus of sub under snapshot s2
    Path ss = SnapshotTestHelper.getSnapshotPath(sub, snapshotName2, "newFile");
    FileStatus statusBeforeDeletion = hdfs.getFileStatus(ss);
    // delete s1
    hdfs.deleteSnapshot(sub, snapshotName);
    checkQuotaUsageComputation(sub, 4, BLOCKSIZE * REPLICATION * 3);
    FileStatus statusAfterDeletion = hdfs.getFileStatus(ss);
    System.out.println("Before deletion: " + statusBeforeDeletion.toString() + "\n" + "After deletion: " + statusAfterDeletion.toString());
    assertEquals(statusBeforeDeletion.toString(), statusAfterDeletion.toString());
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) ExpectedException(org.junit.rules.ExpectedException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) FileNotFoundException(java.io.FileNotFoundException) Test(org.junit.Test)

Example 73 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestSnapshotDeletion method testDeleteSnapshotWithDirModification.

/** Test deleting snapshots with modification on the metadata of directory */
@Test(timeout = 300000)
public void testDeleteSnapshotWithDirModification() throws Exception {
    Path file = new Path(sub, "file");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
    hdfs.setOwner(sub, "user1", "group1");
    // create snapshot s1 for sub1, and change the metadata of sub1
    SnapshotTestHelper.createSnapshot(hdfs, sub, "s1");
    checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
    hdfs.setOwner(sub, "user2", "group2");
    checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
    // create snapshot s2 for sub1, but do not modify sub1 afterwards
    hdfs.createSnapshot(sub, "s2");
    checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
    // create snapshot s3 for sub1, and change the metadata of sub1
    hdfs.createSnapshot(sub, "s3");
    checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
    hdfs.setOwner(sub, "user3", "group3");
    checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
    // delete snapshot s3
    hdfs.deleteSnapshot(sub, "s3");
    checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
    // check sub1's metadata in snapshot s2
    FileStatus statusOfS2 = hdfs.getFileStatus(new Path(sub, HdfsConstants.DOT_SNAPSHOT_DIR + "/s2"));
    assertEquals("user2", statusOfS2.getOwner());
    assertEquals("group2", statusOfS2.getGroup());
    // delete snapshot s2
    hdfs.deleteSnapshot(sub, "s2");
    checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
    // check sub1's metadata in snapshot s1
    FileStatus statusOfS1 = hdfs.getFileStatus(new Path(sub, HdfsConstants.DOT_SNAPSHOT_DIR + "/s1"));
    assertEquals("user1", statusOfS1.getOwner());
    assertEquals("group1", statusOfS1.getGroup());
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Test(org.junit.Test)

Example 74 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestSnapshotDeletion method testRenameSnapshotDiff.

/** 
   * A test covering the case where the snapshot diff to be deleted is renamed 
   * to its previous snapshot. 
   */
@Test(timeout = 300000)
public void testRenameSnapshotDiff() throws Exception {
    cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
    final Path subFile0 = new Path(sub, "file0");
    final Path subsubFile0 = new Path(subsub, "file0");
    DFSTestUtil.createFile(hdfs, subFile0, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, subsubFile0, BLOCKSIZE, REPLICATION, seed);
    hdfs.setOwner(subsub, "owner", "group");
    // create snapshot s0 on sub
    SnapshotTestHelper.createSnapshot(hdfs, sub, "s0");
    checkQuotaUsageComputation(sub, 4, BLOCKSIZE * 6);
    // make some changes on both sub and subsub
    final Path subFile1 = new Path(sub, "file1");
    final Path subsubFile1 = new Path(subsub, "file1");
    DFSTestUtil.createFile(hdfs, subFile1, BLOCKSIZE, REPLICATION_1, seed);
    DFSTestUtil.createFile(hdfs, subsubFile1, BLOCKSIZE, REPLICATION, seed);
    checkQuotaUsageComputation(sub, 6, BLOCKSIZE * 11);
    // create snapshot s1 on sub
    SnapshotTestHelper.createSnapshot(hdfs, sub, "s1");
    checkQuotaUsageComputation(sub, 6, BLOCKSIZE * 11);
    // create snapshot s2 on dir
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s2");
    checkQuotaUsageComputation(dir, 7, BLOCKSIZE * 11);
    checkQuotaUsageComputation(sub, 6, BLOCKSIZE * 11);
    // make changes on subsub and subsubFile1
    hdfs.setOwner(subsub, "unknown", "unknown");
    hdfs.setReplication(subsubFile1, REPLICATION_1);
    checkQuotaUsageComputation(dir, 7, BLOCKSIZE * 11);
    checkQuotaUsageComputation(sub, 6, BLOCKSIZE * 11);
    // make changes on sub
    hdfs.delete(subFile1, true);
    checkQuotaUsageComputation(new Path("/"), 8, BLOCKSIZE * 11);
    checkQuotaUsageComputation(dir, 7, BLOCKSIZE * 11);
    checkQuotaUsageComputation(sub, 6, BLOCKSIZE * 11);
    Path subsubSnapshotCopy = SnapshotTestHelper.getSnapshotPath(dir, "s2", sub.getName() + Path.SEPARATOR + subsub.getName());
    Path subsubFile1SCopy = SnapshotTestHelper.getSnapshotPath(dir, "s2", sub.getName() + Path.SEPARATOR + subsub.getName() + Path.SEPARATOR + subsubFile1.getName());
    Path subFile1SCopy = SnapshotTestHelper.getSnapshotPath(dir, "s2", sub.getName() + Path.SEPARATOR + subFile1.getName());
    FileStatus subsubStatus = hdfs.getFileStatus(subsubSnapshotCopy);
    assertEquals("owner", subsubStatus.getOwner());
    assertEquals("group", subsubStatus.getGroup());
    FileStatus subsubFile1Status = hdfs.getFileStatus(subsubFile1SCopy);
    assertEquals(REPLICATION, subsubFile1Status.getReplication());
    FileStatus subFile1Status = hdfs.getFileStatus(subFile1SCopy);
    assertEquals(REPLICATION_1, subFile1Status.getReplication());
    // delete snapshot s2
    hdfs.deleteSnapshot(dir, "s2");
    checkQuotaUsageComputation(new Path("/"), 8, BLOCKSIZE * 11);
    checkQuotaUsageComputation(dir, 7, BLOCKSIZE * 11);
    checkQuotaUsageComputation(sub, 6, BLOCKSIZE * 11);
    // no snapshot copy for s2
    try {
        hdfs.getFileStatus(subsubSnapshotCopy);
        fail("should throw FileNotFoundException");
    } catch (FileNotFoundException e) {
        GenericTestUtils.assertExceptionContains("File does not exist: " + subsubSnapshotCopy.toString(), e);
    }
    try {
        hdfs.getFileStatus(subsubFile1SCopy);
        fail("should throw FileNotFoundException");
    } catch (FileNotFoundException e) {
        GenericTestUtils.assertExceptionContains("File does not exist: " + subsubFile1SCopy.toString(), e);
    }
    try {
        hdfs.getFileStatus(subFile1SCopy);
        fail("should throw FileNotFoundException");
    } catch (FileNotFoundException e) {
        GenericTestUtils.assertExceptionContains("File does not exist: " + subFile1SCopy.toString(), e);
    }
    // the snapshot copy of s2 should now be renamed to s1 under sub
    subsubSnapshotCopy = SnapshotTestHelper.getSnapshotPath(sub, "s1", subsub.getName());
    subsubFile1SCopy = SnapshotTestHelper.getSnapshotPath(sub, "s1", subsub.getName() + Path.SEPARATOR + subsubFile1.getName());
    subFile1SCopy = SnapshotTestHelper.getSnapshotPath(sub, "s1", subFile1.getName());
    subsubStatus = hdfs.getFileStatus(subsubSnapshotCopy);
    assertEquals("owner", subsubStatus.getOwner());
    assertEquals("group", subsubStatus.getGroup());
    subsubFile1Status = hdfs.getFileStatus(subsubFile1SCopy);
    assertEquals(REPLICATION, subsubFile1Status.getReplication());
    // also subFile1's snapshot copy should have been moved to diff of s1 as 
    // combination
    subFile1Status = hdfs.getFileStatus(subFile1SCopy);
    assertEquals(REPLICATION_1, subFile1Status.getReplication());
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FileNotFoundException(java.io.FileNotFoundException) Test(org.junit.Test)

Example 75 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestSnapshotFileLength method testSnapshotfileLength.

/**
   * Test that we cannot read a file beyond its snapshot length
   * when accessing it via a snapshot path.
   *
   */
@Test(timeout = 300000)
public void testSnapshotfileLength() throws Exception {
    hdfs.mkdirs(sub);
    int bytesRead;
    byte[] buffer = new byte[BLOCKSIZE * 8];
    int origLen = BLOCKSIZE + 1;
    int toAppend = BLOCKSIZE;
    FSDataInputStream fis = null;
    FileStatus fileStatus = null;
    // Create and write a file.
    Path file1 = new Path(sub, file1Name);
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, 0, BLOCKSIZE, REPLICATION, SEED);
    DFSTestUtil.appendFile(hdfs, file1, origLen);
    // Create a snapshot on the parent directory.
    hdfs.allowSnapshot(sub);
    hdfs.createSnapshot(sub, snapshot1);
    Path file1snap1 = SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
    final FileChecksum snapChksum1 = hdfs.getFileChecksum(file1snap1);
    assertThat("file and snapshot file checksums are not equal", hdfs.getFileChecksum(file1), is(snapChksum1));
    // Append to the file.
    FSDataOutputStream out = hdfs.append(file1);
    // HDFS-8150:Fetching checksum for file under construction should fail
    try {
        hdfs.getFileChecksum(file1);
        fail("getFileChecksum should fail for files " + "with blocks under construction");
    } catch (IOException ie) {
        assertTrue(ie.getMessage().contains("Fail to get checksum, since file " + file1 + " is under construction."));
    }
    assertThat("snapshot checksum (post-open for append) has changed", hdfs.getFileChecksum(file1snap1), is(snapChksum1));
    try {
        AppendTestUtil.write(out, 0, toAppend);
        // Test reading from snapshot of file that is open for append
        byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
        assertThat("Wrong data size in snapshot.", dataFromSnapshot.length, is(origLen));
        // Verify that checksum didn't change
        assertThat("snapshot checksum (post-append) has changed", hdfs.getFileChecksum(file1snap1), is(snapChksum1));
    } finally {
        out.close();
    }
    assertThat("file and snapshot file checksums (post-close) are equal", hdfs.getFileChecksum(file1), not(snapChksum1));
    assertThat("snapshot file checksum (post-close) has changed", hdfs.getFileChecksum(file1snap1), is(snapChksum1));
    // Make sure we can read the entire file via its non-snapshot path.
    fileStatus = hdfs.getFileStatus(file1);
    assertThat(fileStatus.getLen(), is((long) origLen + toAppend));
    fis = hdfs.open(file1);
    bytesRead = fis.read(0, buffer, 0, buffer.length);
    assertThat(bytesRead, is(origLen + toAppend));
    fis.close();
    // Try to open the file via its snapshot path.
    fis = hdfs.open(file1snap1);
    fileStatus = hdfs.getFileStatus(file1snap1);
    assertThat(fileStatus.getLen(), is((long) origLen));
    // Make sure we can only read up to the snapshot length.
    bytesRead = fis.read(0, buffer, 0, buffer.length);
    assertThat(bytesRead, is(origLen));
    fis.close();
    byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
    assertThat("Wrong data size in snapshot.", dataFromSnapshot.length, is(origLen));
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) FileChecksum(org.apache.hadoop.fs.FileChecksum) Test(org.junit.Test)

Aggregations

FileStatus (org.apache.hadoop.fs.FileStatus)1156 Path (org.apache.hadoop.fs.Path)910 FileSystem (org.apache.hadoop.fs.FileSystem)417 Test (org.junit.Test)372 IOException (java.io.IOException)296 Configuration (org.apache.hadoop.conf.Configuration)187 ArrayList (java.util.ArrayList)175 FileNotFoundException (java.io.FileNotFoundException)136 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)105 FsPermission (org.apache.hadoop.fs.permission.FsPermission)86 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)67 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)65 HashMap (java.util.HashMap)54 File (java.io.File)41 URI (java.net.URI)41 PathFilter (org.apache.hadoop.fs.PathFilter)38 BufferedReader (java.io.BufferedReader)30 InputStreamReader (java.io.InputStreamReader)30 BlockLocation (org.apache.hadoop.fs.BlockLocation)30 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)30