Search in sources :

Example 36 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class TestNestedSnapshots method testIdCmp.

/**
   * Test {@link Snapshot#ID_COMPARATOR}.
   */
@Test(timeout = 300000)
public void testIdCmp() {
    final PermissionStatus perm = PermissionStatus.createImmutable("user", "group", FsPermission.createImmutable((short) 0));
    final INodeDirectory snapshottable = new INodeDirectory(0, DFSUtil.string2Bytes("foo"), perm, 0L);
    snapshottable.addSnapshottableFeature();
    final Snapshot[] snapshots = { new Snapshot(1, "s1", snapshottable), new Snapshot(1, "s1", snapshottable), new Snapshot(2, "s2", snapshottable), new Snapshot(2, "s2", snapshottable) };
    Assert.assertEquals(0, Snapshot.ID_COMPARATOR.compare(null, null));
    for (Snapshot s : snapshots) {
        Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null, s) > 0);
        Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s, null) < 0);
        for (Snapshot t : snapshots) {
            final int expected = s.getRoot().getLocalName().compareTo(t.getRoot().getLocalName());
            final int computed = Snapshot.ID_COMPARATOR.compare(s, t);
            Assert.assertEquals(expected > 0, computed > 0);
            Assert.assertEquals(expected == 0, computed == 0);
            Assert.assertEquals(expected < 0, computed < 0);
        }
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus) Test(org.junit.Test)

Example 37 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameFileAcrossSnapshottableDirs.

/**
   * Rename a single file across snapshottable dirs.
   */
@Test(timeout = 60000)
public void testRenameFileAcrossSnapshottableDirs() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    final Path foo = new Path(sdir2, "foo");
    DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    hdfs.createSnapshot(sdir1, "s3");
    final Path newfoo = new Path(sdir1, "foo");
    hdfs.rename(foo, newfoo);
    // change the replication factor of foo
    hdfs.setReplication(newfoo, REPL_1);
    // /dir2/.snapshot/s2/foo should still work
    final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
    assertTrue(hdfs.exists(foo_s2));
    FileStatus status = hdfs.getFileStatus(foo_s2);
    assertEquals(REPL, status.getReplication());
    final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo");
    assertFalse(hdfs.exists(foo_s3));
    INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
    Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
    INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
    assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) FileStatus(org.apache.hadoop.fs.FileStatus) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 38 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameFromNonSDir2SDir.

/**
   * Test rename from a non-snapshottable dir to a snapshottable dir
   */
@Test(timeout = 60000)
public void testRenameFromNonSDir2SDir() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    final Path foo = new Path(sdir1, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, snap1);
    final Path newfoo = new Path(sdir2, "foo");
    hdfs.rename(foo, newfoo);
    INode fooNode = fsdir.getINode4Write(newfoo.toString());
    assertTrue(fooNode instanceof INodeDirectory);
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) Test(org.junit.Test)

Example 39 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class DirectorySnapshottableFeature method computeDiffRecursively.

/**
   * Recursively compute the difference between snapshots under a given
   * directory/file.
   * @param snapshotRoot The directory where snapshots were taken.
   * @param node The directory/file under which the diff is computed.
   * @param parentPath Relative path (corresponding to the snapshot root) of
   *                   the node's parent.
   * @param diffReport data structure used to store the diff.
   */
private void computeDiffRecursively(final INodeDirectory snapshotRoot, INode node, List<byte[]> parentPath, SnapshotDiffInfo diffReport) {
    final Snapshot earlierSnapshot = diffReport.isFromEarlier() ? diffReport.getFrom() : diffReport.getTo();
    final Snapshot laterSnapshot = diffReport.isFromEarlier() ? diffReport.getTo() : diffReport.getFrom();
    byte[][] relativePath = parentPath.toArray(new byte[parentPath.size()][]);
    if (node.isDirectory()) {
        final ChildrenDiff diff = new ChildrenDiff();
        INodeDirectory dir = node.asDirectory();
        DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
        if (sf != null) {
            boolean change = sf.computeDiffBetweenSnapshots(earlierSnapshot, laterSnapshot, diff, dir);
            if (change) {
                diffReport.addDirDiff(dir, relativePath, diff);
            }
        }
        ReadOnlyList<INode> children = dir.getChildrenList(earlierSnapshot.getId());
        for (INode child : children) {
            final byte[] name = child.getLocalNameBytes();
            boolean toProcess = diff.searchIndex(ListType.DELETED, name) < 0;
            if (!toProcess && child instanceof INodeReference.WithName) {
                byte[][] renameTargetPath = findRenameTargetPath(snapshotRoot, (WithName) child, laterSnapshot == null ? Snapshot.CURRENT_STATE_ID : laterSnapshot.getId());
                if (renameTargetPath != null) {
                    toProcess = true;
                    diffReport.setRenameTarget(child.getId(), renameTargetPath);
                }
            }
            if (toProcess) {
                parentPath.add(name);
                computeDiffRecursively(snapshotRoot, child, parentPath, diffReport);
                parentPath.remove(parentPath.size() - 1);
            }
        }
    } else if (node.isFile() && node.asFile().isWithSnapshot()) {
        INodeFile file = node.asFile();
        boolean change = file.getFileWithSnapshotFeature().changedBetweenSnapshots(file, earlierSnapshot, laterSnapshot);
        if (change) {
            diffReport.addFileDiff(file, relativePath);
        }
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) SnapshotAndINode(org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode) INode(org.apache.hadoop.hdfs.server.namenode.INode) WithName(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 40 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class DirectorySnapshottableFeature method renameSnapshot.

/**
   * Rename a snapshot
   * @param path
   *          The directory path where the snapshot was taken. Used for
   *          generating exception message.
   * @param oldName
   *          Old name of the snapshot
   * @param newName
   *          New name the snapshot will be renamed to
   * @throws SnapshotException
   *           Throw SnapshotException when either the snapshot with the old
   *           name does not exist or a snapshot with the new name already
   *           exists
   */
public void renameSnapshot(String path, String oldName, String newName) throws SnapshotException {
    if (newName.equals(oldName)) {
        return;
    }
    final int indexOfOld = searchSnapshot(DFSUtil.string2Bytes(oldName));
    if (indexOfOld < 0) {
        throw new SnapshotException("The snapshot " + oldName + " does not exist for directory " + path);
    } else {
        final byte[] newNameBytes = DFSUtil.string2Bytes(newName);
        int indexOfNew = searchSnapshot(newNameBytes);
        if (indexOfNew >= 0) {
            throw new SnapshotException("The snapshot " + newName + " already exists for directory " + path);
        }
        // remove the one with old name from snapshotsByNames
        Snapshot snapshot = snapshotsByNames.remove(indexOfOld);
        final INodeDirectory ssRoot = snapshot.getRoot();
        ssRoot.setLocalName(newNameBytes);
        indexOfNew = -indexOfNew - 1;
        if (indexOfNew <= indexOfOld) {
            snapshotsByNames.add(indexOfNew, snapshot);
        } else {
            // indexOfNew > indexOfOld
            snapshotsByNames.add(indexOfNew - 1, snapshot);
        }
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException)

Aggregations

INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)46 Test (org.junit.Test)29 Path (org.apache.hadoop.fs.Path)26 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)22 INode (org.apache.hadoop.hdfs.server.namenode.INode)21 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)14 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)12 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)11 WithCount (org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount)7 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)6 IOException (java.io.IOException)5 FileStatus (org.apache.hadoop.fs.FileStatus)5 SnapshotException (org.apache.hadoop.hdfs.protocol.SnapshotException)5 ChildrenDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff)5 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)3 SnapshottableDirectoryStatus (org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)3 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 SnapshotAndINode (org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode)2 FileNotFoundException (java.io.FileNotFoundException)1