Search in sources :

Example 31 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestSnapshotReplication method checkSnapshotFileReplication.

/**
   * Check the replication for both the current file and all its prior snapshots
   * 
   * @param currentFile
   *          the Path of the current file
   * @param snapshotRepMap
   *          A map maintaining all the snapshots of the current file, as well
   *          as their expected replication number stored in their corresponding
   *          INodes
   * @param expectedBlockRep
   *          The expected replication number
   * @throws Exception
   */
private void checkSnapshotFileReplication(Path currentFile, Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
    // First check the getPreferredBlockReplication for the INode of
    // the currentFile
    final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
    for (BlockInfo b : inodeOfCurrentFile.getBlocks()) {
        assertEquals(expectedBlockRep, b.getReplication());
    }
    // Then check replication for every snapshot
    for (Path ss : snapshotRepMap.keySet()) {
        final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), DirOp.READ);
        final INodeFile ssInode = iip.getLastINode().asFile();
        // always == expectedBlockRep
        for (BlockInfo b : ssInode.getBlocks()) {
            assertEquals(expectedBlockRep, b.getReplication());
        }
        // Also check the number derived from INodeFile#getFileReplication
        assertEquals(snapshotRepMap.get(ss).shortValue(), ssInode.getFileReplication(iip.getPathSnapshotId()));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 32 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameFileAcrossSnapshottableDirs.

/**
   * Rename a single file across snapshottable dirs.
   */
@Test(timeout = 60000)
public void testRenameFileAcrossSnapshottableDirs() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    final Path foo = new Path(sdir2, "foo");
    DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    hdfs.createSnapshot(sdir1, "s3");
    final Path newfoo = new Path(sdir1, "foo");
    hdfs.rename(foo, newfoo);
    // change the replication factor of foo
    hdfs.setReplication(newfoo, REPL_1);
    // /dir2/.snapshot/s2/foo should still work
    final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
    assertTrue(hdfs.exists(foo_s2));
    FileStatus status = hdfs.getFileStatus(foo_s2);
    assertEquals(REPL, status.getReplication());
    final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo");
    assertFalse(hdfs.exists(foo_s3));
    INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
    Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
    INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
    assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) FileStatus(org.apache.hadoop.fs.FileStatus) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 33 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestOfflineImageViewerWithStripedBlocks method testFileSize.

private void testFileSize(int numBytes) throws IOException, UnresolvedLinkException, SnapshotAccessControlException {
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    File orgFsimage = null;
    Path file = new Path("/eczone/striped");
    FSDataOutputStream out = fs.create(file, true);
    byte[] bytes = DFSTestUtil.generateSequentialBytes(0, numBytes);
    out.write(bytes);
    out.close();
    // Write results to the fsimage file
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    fs.saveNamespace();
    // Determine location of fsimage file
    orgFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (orgFsimage == null) {
        throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    FSImageLoader loader = FSImageLoader.load(orgFsimage.getAbsolutePath());
    String fileStatus = loader.getFileStatus("/eczone/striped");
    long expectedFileSize = bytes.length;
    // Verify space consumed present in BlockInfoStriped
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
    assertEquals(StripedFileTestUtil.getDefaultECPolicy().getId(), fileNode.getErasureCodingPolicyID());
    assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
    long actualFileSize = 0;
    for (BlockInfo blockInfo : fileNode.getBlocks()) {
        assertTrue("Didn't find block striped information", blockInfo instanceof BlockInfoStriped);
        actualFileSize += blockInfo.getNumBytes();
    }
    assertEquals("Wrongly computed file size contains striped blocks", expectedFileSize, actualFileSize);
    // Verify space consumed present in filestatus
    String EXPECTED_FILE_SIZE = "\"length\":" + String.valueOf(expectedFileSize);
    assertTrue("Wrongly computed file size contains striped blocks, file status:" + fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE, fileStatus.contains(EXPECTED_FILE_SIZE));
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) File(java.io.File) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 34 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class DirectorySnapshottableFeature method computeDiffRecursively.

/**
   * Recursively compute the difference between snapshots under a given
   * directory/file.
   * @param snapshotRoot The directory where snapshots were taken.
   * @param node The directory/file under which the diff is computed.
   * @param parentPath Relative path (corresponding to the snapshot root) of
   *                   the node's parent.
   * @param diffReport data structure used to store the diff.
   */
private void computeDiffRecursively(final INodeDirectory snapshotRoot, INode node, List<byte[]> parentPath, SnapshotDiffInfo diffReport) {
    final Snapshot earlierSnapshot = diffReport.isFromEarlier() ? diffReport.getFrom() : diffReport.getTo();
    final Snapshot laterSnapshot = diffReport.isFromEarlier() ? diffReport.getTo() : diffReport.getFrom();
    byte[][] relativePath = parentPath.toArray(new byte[parentPath.size()][]);
    if (node.isDirectory()) {
        final ChildrenDiff diff = new ChildrenDiff();
        INodeDirectory dir = node.asDirectory();
        DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
        if (sf != null) {
            boolean change = sf.computeDiffBetweenSnapshots(earlierSnapshot, laterSnapshot, diff, dir);
            if (change) {
                diffReport.addDirDiff(dir, relativePath, diff);
            }
        }
        ReadOnlyList<INode> children = dir.getChildrenList(earlierSnapshot.getId());
        for (INode child : children) {
            final byte[] name = child.getLocalNameBytes();
            boolean toProcess = diff.searchIndex(ListType.DELETED, name) < 0;
            if (!toProcess && child instanceof INodeReference.WithName) {
                byte[][] renameTargetPath = findRenameTargetPath(snapshotRoot, (WithName) child, laterSnapshot == null ? Snapshot.CURRENT_STATE_ID : laterSnapshot.getId());
                if (renameTargetPath != null) {
                    toProcess = true;
                    diffReport.setRenameTarget(child.getId(), renameTargetPath);
                }
            }
            if (toProcess) {
                parentPath.add(name);
                computeDiffRecursively(snapshotRoot, child, parentPath, diffReport);
                parentPath.remove(parentPath.size() - 1);
            }
        }
    } else if (node.isFile() && node.asFile().isWithSnapshot()) {
        INodeFile file = node.asFile();
        boolean change = file.getFileWithSnapshotFeature().changedBetweenSnapshots(file, earlierSnapshot, laterSnapshot);
        if (change) {
            diffReport.addFileDiff(file, relativePath);
        }
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) SnapshotAndINode(org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode) INode(org.apache.hadoop.hdfs.server.namenode.INode) WithName(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 35 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class DirectoryWithSnapshotFeature method cleanDeletedINode.

/**
   * Clean an inode while we move it from the deleted list of post to the
   * deleted list of prior.
   * @param reclaimContext blocks and inodes that need to be reclaimed
   * @param inode The inode to clean.
   * @param post The post snapshot.
   * @param prior The id of the prior snapshot.
   */
private static void cleanDeletedINode(INode.ReclaimContext reclaimContext, INode inode, final int post, final int prior) {
    Deque<INode> queue = new ArrayDeque<>();
    queue.addLast(inode);
    while (!queue.isEmpty()) {
        INode topNode = queue.pollFirst();
        if (topNode instanceof INodeReference.WithName) {
            INodeReference.WithName wn = (INodeReference.WithName) topNode;
            if (wn.getLastSnapshotId() >= post) {
                INodeReference.WithCount wc = (INodeReference.WithCount) wn.getReferredINode();
                if (wc.getLastWithName() == wn && wc.getParentReference() == null) {
                    // this wn is the last wn inside of the wc, also the dstRef node has
                    // been deleted. In this case, we should treat the referred file/dir
                    // as normal case
                    queue.add(wc.getReferredINode());
                } else {
                    wn.cleanSubtree(reclaimContext, post, prior);
                }
            }
        // For DstReference node, since the node is not in the created list of
        // prior, we should treat it as regular file/dir
        } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
            INodeFile file = topNode.asFile();
            file.getDiffs().deleteSnapshotDiff(reclaimContext, post, prior, file);
        } else if (topNode.isDirectory()) {
            INodeDirectory dir = topNode.asDirectory();
            ChildrenDiff priorChildrenDiff = null;
            DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
            if (sf != null) {
                // delete files/dirs created after prior. Note that these
                // files/dirs, along with inode, were deleted right after post.
                DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
                if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
                    priorChildrenDiff = priorDiff.getChildrenDiff();
                    priorChildrenDiff.destroyCreatedList(reclaimContext, dir);
                }
            }
            for (INode child : dir.getChildrenList(prior)) {
                if (priorChildrenDiff != null && priorChildrenDiff.search(ListType.DELETED, child.getLocalNameBytes()) != null) {
                    continue;
                }
                queue.addLast(child);
            }
        }
    }
}
Also used : INode(org.apache.hadoop.hdfs.server.namenode.INode) ArrayDeque(java.util.ArrayDeque) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory)

Aggregations

INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)35 Path (org.apache.hadoop.fs.Path)24 Test (org.junit.Test)23 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)14 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)10 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)9 INode (org.apache.hadoop.hdfs.server.namenode.INode)7 FileStatus (org.apache.hadoop.fs.FileStatus)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)5 TestINodeFile (org.apache.hadoop.hdfs.server.namenode.TestINodeFile)5 IOException (java.io.IOException)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Block (org.apache.hadoop.hdfs.protocol.Block)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)4 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)4 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)4 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)3 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)3