Search in sources :

Example 6 with SnapshottableDirectoryStatus

use of org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameAndUpdateSnapshottableDirs.

/**
   * Test rename where the src/dst directories are both snapshottable 
   * directories without snapshots. In such case we need to update the 
   * snapshottable dir list in SnapshotManager.
   */
@Test(timeout = 60000)
public void testRenameAndUpdateSnapshottableDirs() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    final Path foo = new Path(sdir1, "foo");
    final Path bar = new Path(sdir2, "bar");
    hdfs.mkdirs(foo);
    hdfs.mkdirs(bar);
    hdfs.allowSnapshot(foo);
    SnapshotTestHelper.createSnapshot(hdfs, bar, snap1);
    assertEquals(2, fsn.getSnapshottableDirListing().length);
    INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
    long fooId = fooNode.getId();
    try {
        hdfs.rename(foo, bar, Rename.OVERWRITE);
        fail("Expect exception since " + bar + " is snapshottable and already has snapshots");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains(bar.toString() + " is snapshottable and already has snapshots", e);
    }
    hdfs.deleteSnapshot(bar, snap1);
    hdfs.rename(foo, bar, Rename.OVERWRITE);
    SnapshottableDirectoryStatus[] dirs = fsn.getSnapshottableDirListing();
    assertEquals(1, dirs.length);
    assertEquals(bar, dirs[0].getFullPath());
    assertEquals(fooId, dirs[0].getDirStatus().getFileId());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) IOException(java.io.IOException) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus) Test(org.junit.Test)

Example 7 with SnapshottableDirectoryStatus

use of org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus in project hadoop by apache.

the class LsSnapshottableDir method run.

@Override
public int run(String[] argv) throws Exception {
    String description = "hdfs lsSnapshottableDir: \n" + "\tGet the list of snapshottable directories that are owned by the current user.\n" + "\tReturn all the snapshottable directories if the current user is a super user.\n";
    if (argv.length != 0) {
        System.err.println("Usage: \n" + description);
        return 1;
    }
    FileSystem fs = FileSystem.get(getConf());
    if (!(fs instanceof DistributedFileSystem)) {
        System.err.println("LsSnapshottableDir can only be used in DistributedFileSystem");
        return 1;
    }
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    try {
        SnapshottableDirectoryStatus[] stats = dfs.getSnapshottableDirListing();
        SnapshottableDirectoryStatus.print(stats, System.out);
    } catch (IOException e) {
        String[] content = e.getLocalizedMessage().split("\n");
        System.err.println("lsSnapshottableDir: " + content[0]);
        return 1;
    }
    return 0;
}
Also used : DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)

Example 8 with SnapshottableDirectoryStatus

use of org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus in project hadoop by apache.

the class SnapshotManager method getSnapshottableDirListing.

/**
   * List all the snapshottable directories that are owned by the current user.
   * @param userName Current user name.
   * @return Snapshottable directories that are owned by the current user,
   *         represented as an array of {@link SnapshottableDirectoryStatus}. If
   *         {@code userName} is null, return all the snapshottable dirs.
   */
public SnapshottableDirectoryStatus[] getSnapshottableDirListing(String userName) {
    if (snapshottables.isEmpty()) {
        return null;
    }
    List<SnapshottableDirectoryStatus> statusList = new ArrayList<SnapshottableDirectoryStatus>();
    for (INodeDirectory dir : snapshottables.values()) {
        if (userName == null || userName.equals(dir.getUserName())) {
            SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(dir.getModificationTime(), dir.getAccessTime(), dir.getFsPermission(), dir.getUserName(), dir.getGroupName(), dir.getLocalNameBytes(), dir.getId(), dir.getChildrenNum(Snapshot.CURRENT_STATE_ID), dir.getDirectorySnapshottableFeature().getNumSnapshots(), dir.getDirectorySnapshottableFeature().getSnapshotQuota(), dir.getParent() == null ? DFSUtilClient.EMPTY_BYTES : DFSUtil.string2Bytes(dir.getParent().getFullPathName()));
            statusList.add(status);
        }
    }
    Collections.sort(statusList, SnapshottableDirectoryStatus.COMPARATOR);
    return statusList.toArray(new SnapshottableDirectoryStatus[statusList.size()]);
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) ArrayList(java.util.ArrayList) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)

Example 9 with SnapshottableDirectoryStatus

use of org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus in project hadoop by apache.

the class TestFSImageWithSnapshot method checkImage.

void checkImage(int s) throws IOException {
    final String name = "s" + s;
    // dump the fsdir tree
    File fsnBefore = dumpTree2File(name + "_before");
    // save the namesystem to a temp file
    File imageFile = saveFSImageToTempFile();
    long numSdirBefore = fsn.getNumSnapshottableDirs();
    long numSnapshotBefore = fsn.getNumSnapshots();
    SnapshottableDirectoryStatus[] dirBefore = hdfs.getSnapshottableDirListing();
    // shutdown the cluster
    cluster.shutdown();
    // dump the fsdir tree
    File fsnBetween = dumpTree2File(name + "_between");
    SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnBetween, true);
    // restart the cluster, and format the cluster
    cluster = new MiniDFSCluster.Builder(conf).format(true).numDataNodes(NUM_DATANODES).build();
    cluster.waitActive();
    fsn = cluster.getNamesystem();
    hdfs = cluster.getFileSystem();
    // load the namesystem from the temp file
    loadFSImageFromTempFile(imageFile);
    // dump the fsdir tree again
    File fsnAfter = dumpTree2File(name + "_after");
    // compare two dumped tree
    SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
    long numSdirAfter = fsn.getNumSnapshottableDirs();
    long numSnapshotAfter = fsn.getNumSnapshots();
    SnapshottableDirectoryStatus[] dirAfter = hdfs.getSnapshottableDirListing();
    Assert.assertEquals(numSdirBefore, numSdirAfter);
    Assert.assertEquals(numSnapshotBefore, numSnapshotAfter);
    Assert.assertEquals(dirBefore.length, dirAfter.length);
    List<String> pathListBefore = new ArrayList<String>();
    for (SnapshottableDirectoryStatus sBefore : dirBefore) {
        pathListBefore.add(sBefore.getFullPath().toString());
    }
    for (SnapshottableDirectoryStatus sAfter : dirAfter) {
        Assert.assertTrue(pathListBefore.contains(sAfter.getFullPath().toString()));
    }
}
Also used : ArrayList(java.util.ArrayList) File(java.io.File) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)

Example 10 with SnapshottableDirectoryStatus

use of org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus in project hadoop by apache.

the class TestSnapshottableDirListing method testListWithDifferentUser.

/**
   * Test the listing with different user names to make sure only directories
   * that are owned by the user are listed.
   */
@Test(timeout = 60000)
public void testListWithDifferentUser() throws Exception {
    cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
    // first make dir1 and dir2 snapshottable
    hdfs.allowSnapshot(dir1);
    hdfs.allowSnapshot(dir2);
    hdfs.setPermission(root, FsPermission.valueOf("-rwxrwxrwx"));
    // create two dirs and make them snapshottable under the name of user1
    UserGroupInformation ugi1 = UserGroupInformation.createUserForTesting("user1", new String[] { "group1" });
    DistributedFileSystem fs1 = (DistributedFileSystem) DFSTestUtil.getFileSystemAs(ugi1, conf);
    Path dir1_user1 = new Path("/dir1_user1");
    Path dir2_user1 = new Path("/dir2_user1");
    fs1.mkdirs(dir1_user1);
    fs1.mkdirs(dir2_user1);
    hdfs.allowSnapshot(dir1_user1);
    hdfs.allowSnapshot(dir2_user1);
    // user2
    UserGroupInformation ugi2 = UserGroupInformation.createUserForTesting("user2", new String[] { "group2" });
    DistributedFileSystem fs2 = (DistributedFileSystem) DFSTestUtil.getFileSystemAs(ugi2, conf);
    Path dir_user2 = new Path("/dir_user2");
    Path subdir_user2 = new Path(dir_user2, "subdir");
    fs2.mkdirs(dir_user2);
    fs2.mkdirs(subdir_user2);
    hdfs.allowSnapshot(dir_user2);
    hdfs.allowSnapshot(subdir_user2);
    // super user
    String supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
    UserGroupInformation superUgi = UserGroupInformation.createUserForTesting("superuser", new String[] { supergroup });
    DistributedFileSystem fs3 = (DistributedFileSystem) DFSTestUtil.getFileSystemAs(superUgi, conf);
    // list the snapshottable dirs for superuser
    SnapshottableDirectoryStatus[] dirs = fs3.getSnapshottableDirListing();
    // 6 snapshottable dirs: dir1, dir2, dir1_user1, dir2_user1, dir_user2, and
    // subdir_user2
    assertEquals(6, dirs.length);
    // list the snapshottable dirs for user1
    dirs = fs1.getSnapshottableDirListing();
    // 2 dirs owned by user1: dir1_user1 and dir2_user1
    assertEquals(2, dirs.length);
    assertEquals(dir1_user1, dirs[0].getFullPath());
    assertEquals(dir2_user1, dirs[1].getFullPath());
    // list the snapshottable dirs for user2
    dirs = fs2.getSnapshottableDirListing();
    // 2 dirs owned by user2: dir_user2 and subdir_user2
    assertEquals(2, dirs.length);
    assertEquals(dir_user2, dirs[0].getFullPath());
    assertEquals(subdir_user2, dirs[1].getFullPath());
}
Also used : Path(org.apache.hadoop.fs.Path) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus) Test(org.junit.Test)

Aggregations

SnapshottableDirectoryStatus (org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)11 Path (org.apache.hadoop.fs.Path)7 Test (org.junit.Test)7 IOException (java.io.IOException)4 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)3 ArrayList (java.util.ArrayList)2 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)2 AccessControlException (org.apache.hadoop.security.AccessControlException)2 EOFException (java.io.EOFException)1 File (java.io.File)1 FileNotFoundException (java.io.FileNotFoundException)1 SocketException (java.net.SocketException)1 SocketTimeoutException (java.net.SocketTimeoutException)1 URISyntaxException (java.net.URISyntaxException)1 Date (java.util.Date)1 ServletException (javax.servlet.ServletException)1 Configuration (org.apache.hadoop.conf.Configuration)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 UnresolvedLinkException (org.apache.hadoop.fs.UnresolvedLinkException)1