Search in sources :

Example 1 with Trash

use of org.apache.hadoop.fs.Trash in project hadoop by apache.

the class TestHDFSTrash method testDeleteTrash.

@Test
public void testDeleteTrash() throws Exception {
    Configuration testConf = new Configuration(conf);
    testConf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "10");
    Path user1Tmp = new Path(TEST_ROOT, "test-del-u1");
    Path user2Tmp = new Path(TEST_ROOT, "test-del-u2");
    // login as user1, move something to trash
    // verify user1 can remove its own trash dir
    fs = DFSTestUtil.login(fs, testConf, user1);
    fs.mkdirs(user1Tmp);
    Trash u1Trash = getPerUserTrash(user1, fs, testConf);
    Path u1t = u1Trash.getCurrentTrashDir(user1Tmp);
    assertTrue(String.format("Failed to move %s to trash", user1Tmp), u1Trash.moveToTrash(user1Tmp));
    assertTrue(String.format("%s should be allowed to remove its own trash directory %s", user1.getUserName(), u1t), fs.delete(u1t, true));
    assertFalse(fs.exists(u1t));
    // login as user2, move something to trash
    fs = DFSTestUtil.login(fs, testConf, user2);
    fs.mkdirs(user2Tmp);
    Trash u2Trash = getPerUserTrash(user2, fs, testConf);
    u2Trash.moveToTrash(user2Tmp);
    Path u2t = u2Trash.getCurrentTrashDir(user2Tmp);
    try {
        // user1 should not be able to remove user2's trash dir
        fs = DFSTestUtil.login(fs, testConf, user1);
        fs.delete(u2t, true);
        fail(String.format("%s should not be able to remove %s trash directory", USER1_NAME, USER2_NAME));
    } catch (AccessControlException e) {
        assertTrue(e instanceof AccessControlException);
        assertTrue("Permission denied messages must carry the username", e.getMessage().contains(USER1_NAME));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) AccessControlException(org.apache.hadoop.security.AccessControlException) Trash(org.apache.hadoop.fs.Trash) TestTrash(org.apache.hadoop.fs.TestTrash) Test(org.junit.Test)

Example 2 with Trash

use of org.apache.hadoop.fs.Trash in project hadoop by apache.

the class TestDFSPermission method testTrashPermission.

@Test(timeout = 30000)
public void testTrashPermission() throws Exception {
    //  /BSS                  user1:group2 777
    //   /BSS/user1            user1:group2 755
    //   /BSS/user1/test       user1:group1 600
    Path rootDir = new Path("/BSS");
    Path user1Dir = new Path("/BSS/user1");
    Path user1File = new Path("/BSS/user1/test");
    try {
        conf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "10");
        fs = FileSystem.get(conf);
        fs.mkdirs(rootDir);
        fs.setPermission(rootDir, new FsPermission((short) 0777));
        fs = DFSTestUtil.login(fs, conf, USER1);
        fs.mkdirs(user1Dir);
        fs.setPermission(user1Dir, new FsPermission((short) 0755));
        fs.setOwner(user1Dir, USER1.getShortUserName(), GROUP2_NAME);
        create(OpType.CREATE, user1File);
        fs.setOwner(user1File, USER1.getShortUserName(), GROUP1_NAME);
        fs.setPermission(user1File, new FsPermission((short) 0600));
        try {
            // login as user2, attempt to delete /BSS/user1
            // this should fail because user2 has no permission to
            // its sub directory.
            fs = DFSTestUtil.login(fs, conf, USER2);
            fs.delete(user1Dir, true);
            fail("User2 should not be allowed to delete user1's dir.");
        } catch (AccessControlException e) {
            e.printStackTrace();
            assertTrue("Permission denied messages must carry the username", e.getMessage().contains(USER2_NAME));
        }
        // ensure the /BSS/user1 still exists
        assertTrue(fs.exists(user1Dir));
        try {
            fs = DFSTestUtil.login(fs, conf, SUPERUSER);
            Trash trash = new Trash(fs, conf);
            Path trashRoot = trash.getCurrentTrashDir(user1Dir);
            while (true) {
                trashRoot = trashRoot.getParent();
                if (trashRoot.getParent().isRoot()) {
                    break;
                }
            }
            fs.mkdirs(trashRoot);
            fs.setPermission(trashRoot, new FsPermission((short) 0777));
            // login as user2, attempt to move /BSS/user1 to trash
            // this should also fail otherwise the directory will be
            // removed by trash emptier (emptier is running by superuser)
            fs = DFSTestUtil.login(fs, conf, USER2);
            Trash userTrash = new Trash(fs, conf);
            assertTrue(userTrash.isEnabled());
            userTrash.moveToTrash(user1Dir);
            fail("User2 should not be allowed to move" + "user1's dir to trash");
        } catch (IOException e) {
            // expect the exception is caused by permission denied
            assertTrue(e.getCause() instanceof AccessControlException);
            e.printStackTrace();
            assertTrue("Permission denied messages must carry the username", e.getCause().getMessage().contains(USER2_NAME));
        }
        // ensure /BSS/user1 still exists
        assertEquals(fs.exists(user1Dir), true);
    } finally {
        fs = DFSTestUtil.login(fs, conf, SUPERUSER);
        fs.delete(rootDir, true);
        conf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "0");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) Trash(org.apache.hadoop.fs.Trash) Test(org.junit.Test)

Example 3 with Trash

use of org.apache.hadoop.fs.Trash in project hadoop by apache.

the class ViewFileSystemBaseTest method testTrashRoot.

@Test
public void testTrashRoot() throws IOException {
    Path mountDataRootPath = new Path("/data");
    Path fsTargetFilePath = new Path("debug.log");
    Path mountDataFilePath = new Path(mountDataRootPath, fsTargetFilePath);
    Path mountDataNonExistingFilePath = new Path(mountDataRootPath, "no.log");
    fileSystemTestHelper.createFile(fsTarget, fsTargetFilePath);
    // Get Trash roots for paths via ViewFileSystem handle
    Path mountDataRootTrashPath = fsView.getTrashRoot(mountDataRootPath);
    Path mountDataFileTrashPath = fsView.getTrashRoot(mountDataFilePath);
    // Get Trash roots for the same set of paths via the mounted filesystem
    Path fsTargetRootTrashRoot = fsTarget.getTrashRoot(mountDataRootPath);
    Path fsTargetFileTrashPath = fsTarget.getTrashRoot(mountDataFilePath);
    // Verify if Trash roots from ViewFileSystem matches that of the ones
    // from the target mounted FileSystem.
    assertEquals(mountDataRootTrashPath.toUri().getPath(), fsTargetRootTrashRoot.toUri().getPath());
    assertEquals(mountDataFileTrashPath.toUri().getPath(), fsTargetFileTrashPath.toUri().getPath());
    assertEquals(mountDataRootTrashPath.toUri().getPath(), mountDataFileTrashPath.toUri().getPath());
    // Verify trash root for an non-existing file but on a valid mountpoint.
    Path trashRoot = fsView.getTrashRoot(mountDataNonExistingFilePath);
    assertEquals(mountDataRootTrashPath.toUri().getPath(), trashRoot.toUri().getPath());
    // Verify trash root for invalid mounts.
    Path invalidMountRootPath = new Path("/invalid_mount");
    Path invalidMountFilePath = new Path(invalidMountRootPath, "debug.log");
    try {
        fsView.getTrashRoot(invalidMountRootPath);
        fail("ViewFileSystem getTashRoot should fail for non-mountpoint paths.");
    } catch (NotInMountpointException e) {
    //expected exception
    }
    try {
        fsView.getTrashRoot(invalidMountFilePath);
        fail("ViewFileSystem getTashRoot should fail for non-mountpoint paths.");
    } catch (NotInMountpointException e) {
    //expected exception
    }
    try {
        fsView.getTrashRoot(null);
        fail("ViewFileSystem getTashRoot should fail for empty paths.");
    } catch (NotInMountpointException e) {
    //expected exception
    }
    // Move the file to trash
    FileStatus fileStatus = fsTarget.getFileStatus(fsTargetFilePath);
    Configuration newConf = new Configuration(conf);
    newConf.setLong("fs.trash.interval", 1000);
    Trash lTrash = new Trash(fsTarget, newConf);
    boolean trashed = lTrash.moveToTrash(fsTargetFilePath);
    Assert.assertTrue("File " + fileStatus + " move to " + "trash failed.", trashed);
    // Verify ViewFileSystem trash roots shows the ones from
    // target mounted FileSystem.
    Assert.assertTrue("", fsView.getTrashRoots(true).size() > 0);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) Configuration(org.apache.hadoop.conf.Configuration) Trash(org.apache.hadoop.fs.Trash) Test(org.junit.Test)

Example 4 with Trash

use of org.apache.hadoop.fs.Trash in project hadoop by apache.

the class NameNode method startTrashEmptier.

private void startTrashEmptier(final Configuration conf) throws IOException {
    long trashInterval = conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT);
    if (trashInterval == 0) {
        return;
    } else if (trashInterval < 0) {
        throw new IOException("Cannot start trash emptier with negative interval." + " Set " + FS_TRASH_INTERVAL_KEY + " to a positive value.");
    }
    // This may be called from the transitionToActive code path, in which
    // case the current user is the administrator, not the NN. The trash
    // emptier needs to run as the NN. See HDFS-3972.
    FileSystem fs = SecurityUtil.doAsLoginUser(new PrivilegedExceptionAction<FileSystem>() {

        @Override
        public FileSystem run() throws IOException {
            return FileSystem.get(conf);
        }
    });
    this.emptier = new Thread(new Trash(fs, conf).getEmptier(), "Trash Emptier");
    this.emptier.setDaemon(true);
    this.emptier.start();
}
Also used : FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) Trash(org.apache.hadoop.fs.Trash)

Example 5 with Trash

use of org.apache.hadoop.fs.Trash in project hadoop by apache.

the class TestHDFSTrash method getPerUserTrash.

/**
   * Return a {@link Trash} instance using giving configuration.
   * The trash root directory is set to an unique directory under
   * {@link #TRASH_ROOT}. Use this method to isolate trash
   * directories for different users.
   */
private Trash getPerUserTrash(UserGroupInformation ugi, FileSystem fileSystem, Configuration config) throws IOException {
    // generate an unique path per instance
    UUID trashId = UUID.randomUUID();
    StringBuffer sb = new StringBuffer().append(ugi.getUserName()).append("-").append(trashId.toString());
    Path userTrashRoot = new Path(TRASH_ROOT, sb.toString());
    FileSystem spyUserFs = Mockito.spy(fileSystem);
    Mockito.when(spyUserFs.getTrashRoot(Mockito.any())).thenReturn(userTrashRoot);
    return new Trash(spyUserFs, config);
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) UUID(java.util.UUID) Trash(org.apache.hadoop.fs.Trash) TestTrash(org.apache.hadoop.fs.TestTrash)

Aggregations

Trash (org.apache.hadoop.fs.Trash)5 Path (org.apache.hadoop.fs.Path)4 Test (org.junit.Test)3 IOException (java.io.IOException)2 Configuration (org.apache.hadoop.conf.Configuration)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 TestTrash (org.apache.hadoop.fs.TestTrash)2 AccessControlException (org.apache.hadoop.security.AccessControlException)2 UUID (java.util.UUID)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)1 FsPermission (org.apache.hadoop.fs.permission.FsPermission)1