Search in sources :

Example 26 with INode

use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.

the class DirectoryWithSnapshotFeature method destroyDstSubtree.

/**
   * Destroy a subtree under a DstReference node.
   */
public static void destroyDstSubtree(INode.ReclaimContext reclaimContext, INode inode, final int snapshot, final int prior) {
    Preconditions.checkArgument(prior != NO_SNAPSHOT_ID);
    if (inode.isReference()) {
        if (inode instanceof INodeReference.WithName && snapshot != Snapshot.CURRENT_STATE_ID) {
            // this inode has been renamed before the deletion of the DstReference
            // subtree
            inode.cleanSubtree(reclaimContext, snapshot, prior);
        } else {
            // for DstReference node, continue this process to its subtree
            destroyDstSubtree(reclaimContext, inode.asReference().getReferredINode(), snapshot, prior);
        }
    } else if (inode.isFile()) {
        inode.cleanSubtree(reclaimContext, snapshot, prior);
    } else if (inode.isDirectory()) {
        Map<INode, INode> excludedNodes = null;
        INodeDirectory dir = inode.asDirectory();
        DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
        if (sf != null) {
            DirectoryDiffList diffList = sf.getDiffs();
            DirectoryDiff priorDiff = diffList.getDiffById(prior);
            if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
                List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
                excludedNodes = cloneDiffList(dList);
            }
            if (snapshot != Snapshot.CURRENT_STATE_ID) {
                diffList.deleteSnapshotDiff(reclaimContext, snapshot, prior, dir);
            }
            priorDiff = diffList.getDiffById(prior);
            if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
                priorDiff.diff.destroyCreatedList(reclaimContext, dir);
            }
        }
        for (INode child : inode.asDirectory().getChildrenList(prior)) {
            if (excludedNodes != null && excludedNodes.containsKey(child)) {
                continue;
            }
            destroyDstSubtree(reclaimContext, child, snapshot, prior);
        }
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference)

Example 27 with INode

use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.

the class SnapshotFSImageFormat method loadDirectoryDiff.

/**
   * Load {@link DirectoryDiff} from fsimage.
   * @param parent The directory that the SnapshotDiff belongs to.
   * @param in The {@link DataInput} instance to read.
   * @param loader The {@link Loader} instance that this loading procedure is 
   *               using.
   * @return A {@link DirectoryDiff}.
   */
private static DirectoryDiff loadDirectoryDiff(INodeDirectory parent, DataInput in, FSImageFormat.Loader loader) throws IOException {
    // 1. Read the full path of the Snapshot root to identify the Snapshot
    final Snapshot snapshot = loader.getSnapshot(in);
    // 2. Load DirectoryDiff#childrenSize
    int childrenSize = in.readInt();
    // 3. Load DirectoryDiff#snapshotINode 
    INodeDirectoryAttributes snapshotINode = loadSnapshotINodeInDirectoryDiff(snapshot, in, loader);
    // 4. Load the created list in SnapshotDiff#Diff
    List<INode> createdList = loadCreatedList(parent, in);
    // 5. Load the deleted list in SnapshotDiff#Diff
    List<INode> deletedList = loadDeletedList(parent, createdList, in, loader);
    // 6. Compose the SnapshotDiff
    List<DirectoryDiff> diffs = parent.getDiffs().asList();
    DirectoryDiff sdiff = new DirectoryDiff(snapshot.getId(), snapshotINode, diffs.isEmpty() ? null : diffs.get(0), childrenSize, createdList, deletedList, snapshotINode == snapshot.getRoot());
    return sdiff;
}
Also used : INodeDirectoryAttributes(org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes) INode(org.apache.hadoop.hdfs.server.namenode.INode) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)

Example 28 with INode

use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.

the class SnapshotFSImageFormat method loadDeletedList.

/**
   * Load the deleted list from the fsimage.
   * 
   * @param parent The directory that the deleted list belongs to.
   * @param createdList The created list associated with the deleted list in 
   *                    the same Diff.
   * @param in The {@link DataInput} to read.
   * @param loader The {@link Loader} instance.
   * @return The deleted list.
   */
private static List<INode> loadDeletedList(INodeDirectory parent, List<INode> createdList, DataInput in, FSImageFormat.Loader loader) throws IOException {
    int deletedSize = in.readInt();
    List<INode> deletedList = new ArrayList<INode>(deletedSize);
    for (int i = 0; i < deletedSize; i++) {
        final INode deleted = loader.loadINodeWithLocalName(true, in, true);
        deletedList.add(deleted);
        // set parent: the parent field of an INode in the deleted list is not 
        // useful, but set the parent here to be consistent with the original 
        // fsdir tree.
        deleted.setParent(parent);
        if (deleted.isFile()) {
            loader.updateBlocksMap(deleted.asFile());
        }
    }
    return deletedList;
}
Also used : INode(org.apache.hadoop.hdfs.server.namenode.INode) ArrayList(java.util.ArrayList)

Example 29 with INode

use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.

the class TestErasureCodingPolicies method testBasicSetECPolicy.

@Test
public void testBasicSetECPolicy() throws IOException, InterruptedException {
    final Path testDir = new Path("/ec");
    fs.mkdir(testDir, FsPermission.getDirDefault());
    /* Normal creation of an erasure coding directory */
    fs.setErasureCodingPolicy(testDir, EC_POLICY.getName());
    /* Verify files under the directory are striped */
    final Path ECFilePath = new Path(testDir, "foo");
    fs.create(ECFilePath);
    INode inode = namesystem.getFSDirectory().getINode(ECFilePath.toString());
    assertTrue(inode.asFile().isStriped());
    /**
     * Verify that setting EC policy on non-empty directory only affects
     * newly created files under the directory.
     */
    final Path notEmpty = new Path("/nonEmpty");
    fs.mkdir(notEmpty, FsPermission.getDirDefault());
    final Path oldFile = new Path(notEmpty, "old");
    fs.create(oldFile);
    fs.setErasureCodingPolicy(notEmpty, EC_POLICY.getName());
    final Path newFile = new Path(notEmpty, "new");
    fs.create(newFile);
    INode oldInode = namesystem.getFSDirectory().getINode(oldFile.toString());
    assertFalse(oldInode.asFile().isStriped());
    INode newInode = namesystem.getFSDirectory().getINode(newFile.toString());
    assertTrue(newInode.asFile().isStriped());
    /* Verify that nested EC policies are supported */
    final Path dir1 = new Path("/dir1");
    final Path dir2 = new Path(dir1, "dir2");
    fs.mkdir(dir1, FsPermission.getDirDefault());
    fs.setErasureCodingPolicy(dir1, EC_POLICY.getName());
    fs.mkdir(dir2, FsPermission.getDirDefault());
    try {
        fs.setErasureCodingPolicy(dir2, EC_POLICY.getName());
    } catch (IOException e) {
        fail("Nested erasure coding policies are supported");
    }
    /* Verify that EC policy cannot be set on a file */
    final Path fPath = new Path("/file");
    fs.create(fPath);
    try {
        fs.setErasureCodingPolicy(fPath, EC_POLICY.getName());
        fail("Erasure coding policy on file");
    } catch (IOException e) {
        assertExceptionContains("erasure coding policy for a file", e);
    }
    // Verify that policies are successfully loaded even when policies
    // are disabled
    cluster.getConfiguration(0).set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, "");
    cluster.restartNameNodes();
    cluster.waitActive();
    // No policies should be enabled after restart
    Assert.assertTrue("No policies should be enabled after restart", fs.getAllErasureCodingPolicies().isEmpty());
    // Already set directory-level policies should still be in effect
    Path disabledPolicy = new Path(dir1, "afterDisabled");
    Assert.assertEquals("Dir does not have policy set", EC_POLICY, fs.getErasureCodingPolicy(dir1));
    fs.create(disabledPolicy).close();
    Assert.assertEquals("File did not inherit dir's policy", EC_POLICY, fs.getErasureCodingPolicy(disabledPolicy));
    // Also check loading disabled EC policies from fsimage
    fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
    cluster.restartNameNodes();
    Assert.assertEquals("Dir does not have policy set", EC_POLICY, fs.getErasureCodingPolicy(dir1));
    Assert.assertEquals("File does not have policy set", EC_POLICY, fs.getErasureCodingPolicy(disabledPolicy));
}
Also used : Path(org.apache.hadoop.fs.Path) INode(org.apache.hadoop.hdfs.server.namenode.INode) IOException(java.io.IOException) Test(org.junit.Test)

Example 30 with INode

use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.

the class TestRollingUpgradeRollback method testRollbackCommand.

@Test
public void testRollbackCommand() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    final Path foo = new Path("/foo");
    final Path bar = new Path("/bar");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final DFSAdmin dfsadmin = new DFSAdmin(conf);
        dfs.mkdirs(foo);
        // start rolling upgrade
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        Assert.assertEquals(0, dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // create new directory
        dfs.mkdirs(bar);
        // check NNStorage
        NNStorage storage = cluster.getNamesystem().getFSImage().getStorage();
        // (startSegment, mkdir, endSegment) 
        checkNNStorage(storage, 3, -1);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
    NameNode nn = null;
    try {
        nn = NameNode.createNameNode(new String[] { "-rollingUpgrade", "rollback" }, conf);
        // make sure /foo is still there, but /bar is not
        INode fooNode = nn.getNamesystem().getFSDirectory().getINode4Write(foo.toString());
        Assert.assertNotNull(fooNode);
        INode barNode = nn.getNamesystem().getFSDirectory().getINode4Write(bar.toString());
        Assert.assertNull(barNode);
        // check the details of NNStorage
        NNStorage storage = nn.getNamesystem().getFSImage().getStorage();
        // (startSegment, upgrade marker, mkdir, endSegment)
        checkNNStorage(storage, 3, 7);
    } finally {
        if (nn != null) {
            nn.stop();
            nn.join();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) INode(org.apache.hadoop.hdfs.server.namenode.INode) Configuration(org.apache.hadoop.conf.Configuration) NNStorage(org.apache.hadoop.hdfs.server.namenode.NNStorage) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Aggregations

INode (org.apache.hadoop.hdfs.server.namenode.INode)40 Test (org.junit.Test)23 Path (org.apache.hadoop.fs.Path)22 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)21 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)15 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)11 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)11 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)8 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)8 IOException (java.io.IOException)6 ArrayList (java.util.ArrayList)6 WithCount (org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount)5 ChildrenDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff)5 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)3 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)3 DiffReportEntry (org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry)2 SnapshotAndINode (org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode)2 ArrayDeque (java.util.ArrayDeque)1 Date (java.util.Date)1