Search in sources :

Example 1 with SnapshottableDirectoryStatus

use of org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus in project hadoop by apache.

the class TestSnapshottableDirListing method testListSnapshottableDir.

/**
   * Test listing all the snapshottable directories
   */
@Test(timeout = 60000)
public void testListSnapshottableDir() throws Exception {
    cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
    // Initially there is no snapshottable directories in the system
    SnapshottableDirectoryStatus[] dirs = hdfs.getSnapshottableDirListing();
    assertNull(dirs);
    // Make root as snapshottable
    final Path root = new Path("/");
    hdfs.allowSnapshot(root);
    dirs = hdfs.getSnapshottableDirListing();
    assertEquals(1, dirs.length);
    assertEquals("", dirs[0].getDirStatus().getLocalName());
    assertEquals(root, dirs[0].getFullPath());
    // Make root non-snaphsottable
    hdfs.disallowSnapshot(root);
    dirs = hdfs.getSnapshottableDirListing();
    assertNull(dirs);
    // Make dir1 as snapshottable
    hdfs.allowSnapshot(dir1);
    dirs = hdfs.getSnapshottableDirListing();
    assertEquals(1, dirs.length);
    assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName());
    assertEquals(dir1, dirs[0].getFullPath());
    // There is no snapshot for dir1 yet
    assertEquals(0, dirs[0].getSnapshotNumber());
    // Make dir2 as snapshottable
    hdfs.allowSnapshot(dir2);
    dirs = hdfs.getSnapshottableDirListing();
    assertEquals(2, dirs.length);
    assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName());
    assertEquals(dir1, dirs[0].getFullPath());
    assertEquals(dir2.getName(), dirs[1].getDirStatus().getLocalName());
    assertEquals(dir2, dirs[1].getFullPath());
    // There is no snapshot for dir2 yet
    assertEquals(0, dirs[1].getSnapshotNumber());
    // Create dir3
    final Path dir3 = new Path("/TestSnapshot3");
    hdfs.mkdirs(dir3);
    // Rename dir3 to dir2
    hdfs.rename(dir3, dir2, Rename.OVERWRITE);
    // Now we only have one snapshottable dir: dir1
    dirs = hdfs.getSnapshottableDirListing();
    assertEquals(1, dirs.length);
    assertEquals(dir1, dirs[0].getFullPath());
    // Make dir2 snapshottable again
    hdfs.allowSnapshot(dir2);
    // Create a snapshot for dir2
    hdfs.createSnapshot(dir2, "s1");
    hdfs.createSnapshot(dir2, "s2");
    dirs = hdfs.getSnapshottableDirListing();
    // There are now 2 snapshots for dir2
    assertEquals(dir2, dirs[1].getFullPath());
    assertEquals(2, dirs[1].getSnapshotNumber());
    // Create sub-dirs under dir1
    Path sub1 = new Path(dir1, "sub1");
    Path file1 = new Path(sub1, "file1");
    Path sub2 = new Path(dir1, "sub2");
    Path file2 = new Path(sub2, "file2");
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
    // Make sub1 and sub2 snapshottable
    hdfs.allowSnapshot(sub1);
    hdfs.allowSnapshot(sub2);
    dirs = hdfs.getSnapshottableDirListing();
    assertEquals(4, dirs.length);
    assertEquals(dir1, dirs[0].getFullPath());
    assertEquals(dir2, dirs[1].getFullPath());
    assertEquals(sub1, dirs[2].getFullPath());
    assertEquals(sub2, dirs[3].getFullPath());
    // reset sub1
    hdfs.disallowSnapshot(sub1);
    dirs = hdfs.getSnapshottableDirListing();
    assertEquals(3, dirs.length);
    assertEquals(dir1, dirs[0].getFullPath());
    assertEquals(dir2, dirs[1].getFullPath());
    assertEquals(sub2, dirs[2].getFullPath());
    // Remove dir1, both dir1 and sub2 will be removed
    hdfs.delete(dir1, true);
    dirs = hdfs.getSnapshottableDirListing();
    assertEquals(1, dirs.length);
    assertEquals(dir2.getName(), dirs[0].getDirStatus().getLocalName());
    assertEquals(dir2, dirs[0].getFullPath());
}
Also used : Path(org.apache.hadoop.fs.Path) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus) Test(org.junit.Test)

Example 2 with SnapshottableDirectoryStatus

use of org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus in project hadoop by apache.

the class TestSnapshotMetrics method testSnapshottableDirs.

/**
   * Test the metric SnapshottableDirectories, AllowSnapshotOps,
   * DisallowSnapshotOps, and listSnapshottableDirOps
   */
@Test
public void testSnapshottableDirs() throws Exception {
    cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
    assertGauge("SnapshottableDirectories", 0, getMetrics(NS_METRICS));
    assertCounter("AllowSnapshotOps", 0L, getMetrics(NN_METRICS));
    assertCounter("DisallowSnapshotOps", 0L, getMetrics(NN_METRICS));
    // Allow snapshots for directories, and check the metrics
    hdfs.allowSnapshot(sub1);
    assertGauge("SnapshottableDirectories", 1, getMetrics(NS_METRICS));
    assertCounter("AllowSnapshotOps", 1L, getMetrics(NN_METRICS));
    Path sub2 = new Path(dir, "sub2");
    Path file = new Path(sub2, "file");
    DFSTestUtil.createFile(hdfs, file, 1024, REPLICATION, seed);
    hdfs.allowSnapshot(sub2);
    assertGauge("SnapshottableDirectories", 2, getMetrics(NS_METRICS));
    assertCounter("AllowSnapshotOps", 2L, getMetrics(NN_METRICS));
    Path subsub1 = new Path(sub1, "sub1sub1");
    Path subfile = new Path(subsub1, "file");
    DFSTestUtil.createFile(hdfs, subfile, 1024, REPLICATION, seed);
    hdfs.allowSnapshot(subsub1);
    assertGauge("SnapshottableDirectories", 3, getMetrics(NS_METRICS));
    assertCounter("AllowSnapshotOps", 3L, getMetrics(NN_METRICS));
    // Set an already snapshottable directory to snapshottable, should not
    // change the metrics
    hdfs.allowSnapshot(sub1);
    assertGauge("SnapshottableDirectories", 3, getMetrics(NS_METRICS));
    // But the number of allowSnapshot operations still increases
    assertCounter("AllowSnapshotOps", 4L, getMetrics(NN_METRICS));
    // Disallow the snapshot for snapshottable directories, then check the
    // metrics again
    hdfs.disallowSnapshot(sub1);
    assertGauge("SnapshottableDirectories", 2, getMetrics(NS_METRICS));
    assertCounter("DisallowSnapshotOps", 1L, getMetrics(NN_METRICS));
    // delete subsub1, snapshottable directories should be 1
    hdfs.delete(subsub1, true);
    assertGauge("SnapshottableDirectories", 1, getMetrics(NS_METRICS));
    // list all the snapshottable directories
    SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
    assertEquals(1, status.length);
    assertCounter("ListSnapshottableDirOps", 1L, getMetrics(NN_METRICS));
}
Also used : Path(org.apache.hadoop.fs.Path) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus) Test(org.junit.Test)

Example 3 with SnapshottableDirectoryStatus

use of org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus in project hadoop by apache.

the class NamenodeFsck method fsck.

/**
   * Check files on DFS, starting from the indicated path.
   */
public void fsck() {
    final long startTime = Time.monotonicNow();
    try {
        if (blockIds != null) {
            String[] blocks = blockIds.split(" ");
            StringBuilder sb = new StringBuilder();
            sb.append("FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + remoteAddress + " at " + new Date());
            out.println(sb);
            sb.append(" for blockIds: \n");
            for (String blk : blocks) {
                if (blk == null || !blk.contains(Block.BLOCK_FILE_PREFIX)) {
                    out.println("Incorrect blockId format: " + blk);
                    continue;
                }
                out.print("\n");
                blockIdCK(blk);
                sb.append(blk + "\n");
            }
            LOG.info(sb);
            namenode.getNamesystem().logFsckEvent("/", remoteAddress);
            out.flush();
            return;
        }
        String msg = "FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + remoteAddress + " for path " + path + " at " + new Date();
        LOG.info(msg);
        out.println(msg);
        namenode.getNamesystem().logFsckEvent(path, remoteAddress);
        if (snapshottableDirs != null) {
            SnapshottableDirectoryStatus[] snapshotDirs = namenode.getRpcServer().getSnapshottableDirListing();
            if (snapshotDirs != null) {
                for (SnapshottableDirectoryStatus dir : snapshotDirs) {
                    snapshottableDirs.add(dir.getFullPath().toString());
                }
            }
        }
        final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path);
        if (file != null) {
            if (showCorruptFileBlocks) {
                listCorruptFileBlocks();
                return;
            }
            if (this.showStoragePolcies) {
                storageTypeSummary = new StoragePolicySummary(namenode.getNamesystem().getBlockManager().getStoragePolicies());
            }
            Result replRes = new ReplicationResult(conf);
            Result ecRes = new ErasureCodingResult(conf);
            check(path, file, replRes, ecRes);
            out.print("\nStatus: ");
            out.println(replRes.isHealthy() && ecRes.isHealthy() ? "HEALTHY" : "CORRUPT");
            out.println(" Number of data-nodes:\t" + totalDatanodes);
            out.println(" Number of racks:\t\t" + networktopology.getNumOfRacks());
            out.println(" Total dirs:\t\t\t" + totalDirs);
            out.println(" Total symlinks:\t\t" + totalSymlinks);
            out.println("\nReplicated Blocks:");
            out.println(replRes);
            out.println("\nErasure Coded Block Groups:");
            out.println(ecRes);
            if (this.showStoragePolcies) {
                out.print(storageTypeSummary);
            }
            out.println("FSCK ended at " + new Date() + " in " + (Time.monotonicNow() - startTime + " milliseconds"));
            // fatal.  Otherwise many unit tests will pass even when there are bugs.
            if (internalError) {
                throw new IOException("fsck encountered internal errors!");
            }
            // of the report.
            if (replRes.isHealthy() && ecRes.isHealthy()) {
                out.print("\n\nThe filesystem under path '" + path + "' " + HEALTHY_STATUS);
            } else {
                out.print("\n\nThe filesystem under path '" + path + "' " + CORRUPT_STATUS);
            }
        } else {
            out.print("\n\nPath '" + path + "' " + NONEXISTENT_STATUS);
        }
    } catch (Exception e) {
        String errMsg = "Fsck on path '" + path + "' " + FAILURE_STATUS;
        LOG.warn(errMsg, e);
        out.println("FSCK ended at " + new Date() + " in " + (Time.monotonicNow() - startTime + " milliseconds"));
        out.println(e.getMessage());
        out.print("\n\n" + errMsg);
    } finally {
        out.close();
    }
}
Also used : IOException(java.io.IOException) Date(java.util.Date) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)

Example 4 with SnapshottableDirectoryStatus

use of org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus in project hadoop by apache.

the class TestFSImageWithSnapshot method testSnapshotOnRoot.

/**
   * Test when there is snapshot taken on root
   */
@Test
public void testSnapshotOnRoot() throws Exception {
    final Path root = new Path("/");
    hdfs.allowSnapshot(root);
    hdfs.createSnapshot(root, "s1");
    cluster.shutdown();
    cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(NUM_DATANODES).build();
    cluster.waitActive();
    fsn = cluster.getNamesystem();
    hdfs = cluster.getFileSystem();
    // save namespace and restart cluster
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    hdfs.saveNamespace();
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    cluster.shutdown();
    cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(NUM_DATANODES).build();
    cluster.waitActive();
    fsn = cluster.getNamesystem();
    hdfs = cluster.getFileSystem();
    INodeDirectory rootNode = fsn.dir.getINode4Write(root.toString()).asDirectory();
    assertTrue("The children list of root should be empty", rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
    // one snapshot on root: s1
    List<DirectoryDiff> diffList = rootNode.getDiffs().asList();
    assertEquals(1, diffList.size());
    Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes("s1"));
    assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
    // check SnapshotManager's snapshottable directory list
    assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());
    SnapshottableDirectoryStatus[] sdirs = fsn.getSnapshotManager().getSnapshottableDirListing(null);
    assertEquals(root, sdirs[0].getFullPath());
    // save namespace and restart cluster
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    hdfs.saveNamespace();
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    cluster.shutdown();
    cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(NUM_DATANODES).build();
    cluster.waitActive();
    fsn = cluster.getNamesystem();
    hdfs = cluster.getFileSystem();
}
Also used : Path(org.apache.hadoop.fs.Path) Snapshot(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus) Test(org.junit.Test)

Example 5 with SnapshottableDirectoryStatus

use of org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus in project hadoop by apache.

the class TestSetQuotaWithSnapshot method testClearQuota.

/**
   * Test clear quota of a snapshottable dir or a dir with snapshot.
   */
@Test
public void testClearQuota() throws Exception {
    final Path dir = new Path("/TestSnapshot");
    hdfs.mkdirs(dir);
    hdfs.allowSnapshot(dir);
    hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_DONT_SET);
    INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertTrue(dirNode.isSnapshottable());
    assertEquals(0, dirNode.getDiffs().asList().size());
    hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET - 1, HdfsConstants.QUOTA_DONT_SET - 1);
    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertTrue(dirNode.isSnapshottable());
    assertEquals(0, dirNode.getDiffs().asList().size());
    hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertTrue(dirNode.isSnapshottable());
    assertEquals(0, dirNode.getDiffs().asList().size());
    // allow snapshot on dir and create snapshot s1
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
    // clear quota of dir
    hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
    // dir should still be a snapshottable directory
    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertTrue(dirNode.isSnapshottable());
    assertEquals(1, dirNode.getDiffs().asList().size());
    SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
    assertEquals(1, status.length);
    assertEquals(dir, status[0].getFullPath());
    final Path subDir = new Path(dir, "sub");
    hdfs.mkdirs(subDir);
    hdfs.createSnapshot(dir, "s2");
    final Path file = new Path(subDir, "file");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
    hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
    INode subNode = fsdir.getINode4Write(subDir.toString());
    assertTrue(subNode.asDirectory().isWithSnapshot());
    List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList();
    assertEquals(1, diffList.size());
    Snapshot s2 = dirNode.getSnapshot(DFSUtil.string2Bytes("s2"));
    assertEquals(s2.getId(), diffList.get(0).getSnapshotId());
    List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
    assertEquals(1, createdList.size());
    assertSame(fsdir.getINode4Write(file.toString()), createdList.get(0));
}
Also used : Path(org.apache.hadoop.fs.Path) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus) Test(org.junit.Test)

Aggregations

SnapshottableDirectoryStatus (org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)11 Path (org.apache.hadoop.fs.Path)7 Test (org.junit.Test)7 IOException (java.io.IOException)4 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)3 ArrayList (java.util.ArrayList)2 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)2 AccessControlException (org.apache.hadoop.security.AccessControlException)2 EOFException (java.io.EOFException)1 File (java.io.File)1 FileNotFoundException (java.io.FileNotFoundException)1 SocketException (java.net.SocketException)1 SocketTimeoutException (java.net.SocketTimeoutException)1 URISyntaxException (java.net.URISyntaxException)1 Date (java.util.Date)1 ServletException (javax.servlet.ServletException)1 Configuration (org.apache.hadoop.conf.Configuration)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 UnresolvedLinkException (org.apache.hadoop.fs.UnresolvedLinkException)1