Search in sources :

Example 86 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestFileTruncate method testCopyOnTruncateWithDataNodesRestart.

/**
   * The last block is truncated at mid. (copy-on-truncate)
   * dn1 is shutdown before truncate and restart after truncate successful.
   */
@Test(timeout = 60000)
public void testCopyOnTruncateWithDataNodesRestart() throws Exception {
    int startingFileSize = 3 * BLOCK_SIZE;
    byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
    final Path p = new Path(parent, "testCopyOnTruncateWithDataNodesRestart");
    writeContents(contents, startingFileSize, p);
    LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
    fs.allowSnapshot(parent);
    fs.createSnapshot(parent, "ss0");
    int dn = 1;
    int toTruncateLength = 1;
    int newLength = startingFileSize - toTruncateLength;
    cluster.getDataNodes().get(dn).shutdown();
    truncateAndRestartDN(p, dn, newLength);
    checkBlockRecovery(p);
    LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
    /*
     * For copy-on-truncate, new block is made with new block id and new GS.
     * The replicas of the new block is 2, then it will be replicated to dn1.
     */
    assertNotEquals(newBlock.getBlock().getBlockId(), oldBlock.getBlock().getBlockId());
    assertEquals(newBlock.getBlock().getGenerationStamp(), oldBlock.getBlock().getGenerationStamp() + 1);
    // Wait replicas come to 3
    DFSTestUtil.waitReplication(fs, p, REPLICATION);
    FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(dn);
    // New block is replicated to dn1
    assertEquals(utils.getStoredDataLength(newBlock.getBlock()), newBlock.getBlockSize());
    // Old replica exists too since there is snapshot
    assertEquals(utils.getStoredDataLength(oldBlock.getBlock()), oldBlock.getBlockSize());
    assertEquals(utils.getStoredGenerationStamp(oldBlock.getBlock()), oldBlock.getBlock().getGenerationStamp());
    // Validate the file
    FileStatus fileStatus = fs.getFileStatus(p);
    assertThat(fileStatus.getLen(), is((long) newLength));
    checkFullFile(p, newLength, contents);
    fs.deleteSnapshot(parent, "ss0");
    fs.delete(parent, true);
}
Also used : Path(org.apache.hadoop.fs.Path) FsDatasetTestUtils(org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Example 87 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestFileTruncate method testTruncate4Symlink.

@Test
public void testTruncate4Symlink() throws IOException {
    final int fileLength = 3 * BLOCK_SIZE;
    fs.mkdirs(parent);
    final byte[] contents = AppendTestUtil.initBuffer(fileLength);
    final Path file = new Path(parent, "testTruncate4Symlink");
    writeContents(contents, fileLength, file);
    final Path link = new Path(parent, "link");
    fs.createSymlink(file, link, false);
    final int newLength = fileLength / 3;
    boolean isReady = fs.truncate(link, newLength);
    assertTrue("Recovery is not expected.", isReady);
    FileStatus fileStatus = fs.getFileStatus(file);
    assertThat(fileStatus.getLen(), is((long) newLength));
    ContentSummary cs = fs.getContentSummary(parent);
    assertEquals("Bad disk space usage", cs.getSpaceConsumed(), newLength * REPLICATION);
    // validate the file content
    checkFullFile(file, newLength, contents);
    fs.delete(parent, true);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) ContentSummary(org.apache.hadoop.fs.ContentSummary) Test(org.junit.Test)

Example 88 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestFileTruncate method testTruncateWithDataNodesRestart.

/**
   * The last block is truncated at mid. (non copy-on-truncate)
   * dn0 is shutdown before truncate and restart after truncate successful.
   */
@Test(timeout = 60000)
public void testTruncateWithDataNodesRestart() throws Exception {
    int startingFileSize = 3 * BLOCK_SIZE;
    byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
    final Path p = new Path(parent, "testTruncateWithDataNodesRestart");
    writeContents(contents, startingFileSize, p);
    LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
    int dn = 0;
    int toTruncateLength = 1;
    int newLength = startingFileSize - toTruncateLength;
    cluster.getDataNodes().get(dn).shutdown();
    truncateAndRestartDN(p, dn, newLength);
    checkBlockRecovery(p);
    LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
    /*
     * For non copy-on-truncate, the truncated block id is the same, but the 
     * GS should increase.
     * The truncated block will be replicated to dn0 after it restarts.
     */
    assertEquals(newBlock.getBlock().getBlockId(), oldBlock.getBlock().getBlockId());
    assertEquals(newBlock.getBlock().getGenerationStamp(), oldBlock.getBlock().getGenerationStamp() + 1);
    Thread.sleep(2000);
    // trigger the second time BR to delete the corrupted replica if there's one
    cluster.triggerBlockReports();
    // Wait replicas come to 3
    DFSTestUtil.waitReplication(fs, p, REPLICATION);
    // Old replica is disregarded and replaced with the truncated one
    FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(dn);
    assertEquals(utils.getStoredDataLength(newBlock.getBlock()), newBlock.getBlockSize());
    assertEquals(utils.getStoredGenerationStamp(newBlock.getBlock()), newBlock.getBlock().getGenerationStamp());
    // Validate the file
    FileStatus fileStatus = fs.getFileStatus(p);
    assertThat(fileStatus.getLen(), is((long) newLength));
    checkFullFile(p, newLength, contents);
    fs.delete(parent, true);
}
Also used : Path(org.apache.hadoop.fs.Path) FsDatasetTestUtils(org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Example 89 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameFileAndDeleteSnapshot.

/**
   * Test renaming a file and then delete snapshots.
   */
@Test
public void testRenameFileAndDeleteSnapshot() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    final Path foo = new Path(sdir2, "foo");
    DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    hdfs.createSnapshot(sdir1, "s3");
    final Path newfoo = new Path(sdir1, "foo");
    hdfs.rename(foo, newfoo);
    hdfs.setReplication(newfoo, REPL_1);
    hdfs.createSnapshot(sdir1, "s4");
    hdfs.setReplication(newfoo, REPL_2);
    FileStatus status = hdfs.getFileStatus(newfoo);
    assertEquals(REPL_2, status.getReplication());
    final Path foo_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4", "foo");
    status = hdfs.getFileStatus(foo_s4);
    assertEquals(REPL_1, status.getReplication());
    hdfs.createSnapshot(sdir1, "s5");
    final Path foo_s5 = SnapshotTestHelper.getSnapshotPath(sdir1, "s5", "foo");
    status = hdfs.getFileStatus(foo_s5);
    assertEquals(REPL_2, status.getReplication());
    // delete snapshot s5.
    hdfs.deleteSnapshot(sdir1, "s5");
    restartClusterAndCheckImage(true);
    assertFalse(hdfs.exists(foo_s5));
    status = hdfs.getFileStatus(foo_s4);
    assertEquals(REPL_1, status.getReplication());
    // delete snapshot s4.
    hdfs.deleteSnapshot(sdir1, "s4");
    assertFalse(hdfs.exists(foo_s4));
    Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo");
    assertFalse(hdfs.exists(foo_s3));
    foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo");
    assertFalse(hdfs.exists(foo_s3));
    final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
    assertTrue(hdfs.exists(foo_s2));
    status = hdfs.getFileStatus(foo_s2);
    assertEquals(REPL, status.getReplication());
    INodeFile snode = fsdir.getINode(newfoo.toString()).asFile();
    assertEquals(1, snode.getDiffs().asList().size());
    INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
    Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
    assertEquals(s2.getId(), snode.getDiffs().getLastSnapshotId());
    // restart cluster
    restartClusterAndCheckImage(true);
    // delete snapshot s2.
    hdfs.deleteSnapshot(sdir2, "s2");
    assertFalse(hdfs.exists(foo_s2));
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    hdfs.deleteSnapshot(sdir1, "s3");
    restartClusterAndCheckImage(true);
    hdfs.deleteSnapshot(sdir1, "s1");
    restartClusterAndCheckImage(true);
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) FileStatus(org.apache.hadoop.fs.FileStatus) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 90 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class JobHistoryEventHandler method mkdir.

private void mkdir(FileSystem fs, Path path, FsPermission fsp) throws IOException {
    if (!fs.exists(path)) {
        try {
            fs.mkdirs(path, fsp);
            FileStatus fsStatus = fs.getFileStatus(path);
            LOG.info("Perms after creating " + fsStatus.getPermission().toShort() + ", Expected: " + fsp.toShort());
            if (fsStatus.getPermission().toShort() != fsp.toShort()) {
                LOG.info("Explicitly setting permissions to : " + fsp.toShort() + ", " + fsp);
                fs.setPermission(path, fsp);
            }
        } catch (FileAlreadyExistsException e) {
            LOG.info("Directory: [" + path + "] already exists.");
        }
    }
}
Also used : FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) FileStatus(org.apache.hadoop.fs.FileStatus)

Aggregations

FileStatus (org.apache.hadoop.fs.FileStatus)1156 Path (org.apache.hadoop.fs.Path)910 FileSystem (org.apache.hadoop.fs.FileSystem)417 Test (org.junit.Test)372 IOException (java.io.IOException)296 Configuration (org.apache.hadoop.conf.Configuration)187 ArrayList (java.util.ArrayList)175 FileNotFoundException (java.io.FileNotFoundException)136 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)105 FsPermission (org.apache.hadoop.fs.permission.FsPermission)86 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)67 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)65 HashMap (java.util.HashMap)54 File (java.io.File)41 URI (java.net.URI)41 PathFilter (org.apache.hadoop.fs.PathFilter)38 BufferedReader (java.io.BufferedReader)30 InputStreamReader (java.io.InputStreamReader)30 BlockLocation (org.apache.hadoop.fs.BlockLocation)30 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)30