Search in sources :

Example 91 with AclEntry

use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.

the class FSAclBaseTest method testModifyAclEntriesStickyBit.

@Test
public void testModifyAclEntriesStickyBit() throws IOException {
    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 01750));
    List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, OTHER, NONE), aclEntry(DEFAULT, USER, "foo", ALL));
    fs.setAcl(path, aclSpec);
    aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
    fs.modifyAclEntries(path, aclSpec);
    AclStatus s = fs.getAclStatus(path);
    AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned);
    assertPermission((short) 011750);
    assertAclFeature(true);
}
Also used : AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) Test(org.junit.Test)

Example 92 with AclEntry

use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.

the class FSAclBaseTest method testRemoveAclEntriesMinimalDefault.

@Test
public void testRemoveAclEntriesMinimalDefault() throws IOException {
    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
    List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, OTHER, NONE), aclEntry(DEFAULT, USER, "foo", ALL));
    fs.setAcl(path, aclSpec);
    aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "foo"), aclEntry(ACCESS, MASK), aclEntry(DEFAULT, USER, "foo"), aclEntry(DEFAULT, MASK));
    fs.removeAclEntries(path, aclSpec);
    AclStatus s = fs.getAclStatus(path);
    AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(new AclEntry[] { aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned);
    assertPermission((short) 010750);
    assertAclFeature(true);
}
Also used : AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) Test(org.junit.Test)

Example 93 with AclEntry

use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.

the class FSAclBaseTest method testDeDuplication.

/**
   * Verify the de-duplication of AclFeatures with same entries.
   *
   * @throws Exception
   */
@Test
public void testDeDuplication() throws Exception {
    // This test needs to verify the count of the references which is held by
    // static data structure. So shutting down entire cluster to get the fresh
    // data.
    shutdown();
    AclStorage.getUniqueAclFeatures().clear();
    startCluster();
    setUp();
    int currentSize = 0;
    Path p1 = new Path("/testDeduplication");
    {
        // unique default AclEntries for this test
        List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "testdeduplicateuser", ALL), aclEntry(DEFAULT, GROUP, "testdeduplicategroup", ALL));
        fs.mkdirs(p1);
        fs.modifyAclEntries(p1, aclSpec);
        assertEquals("One more ACL feature should be unique", currentSize + 1, AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
        currentSize++;
    }
    Path child1 = new Path(p1, "child1");
    AclFeature child1AclFeature;
    {
        // new child dir should copy entries from its parent.
        fs.mkdirs(child1);
        assertEquals("One more ACL feature should be unique", currentSize + 1, AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
        child1AclFeature = getAclFeature(child1, cluster);
        assertEquals("Reference count should be 1", 1, child1AclFeature.getRefCount());
        currentSize++;
    }
    Path child2 = new Path(p1, "child2");
    {
        // new child dir should copy entries from its parent. But all entries are
        // same as its sibling without any more acl changes.
        fs.mkdirs(child2);
        assertEquals("existing AclFeature should be re-used", currentSize, AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
        AclFeature child2AclFeature = getAclFeature(child1, cluster);
        assertSame("Same Aclfeature should be re-used", child1AclFeature, child2AclFeature);
        assertEquals("Reference count should be 2", 2, child2AclFeature.getRefCount());
    }
    {
        // modification of ACL on should decrement the original reference count
        // and increase new one.
        List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "user1", ALL));
        fs.modifyAclEntries(child1, aclSpec);
        AclFeature modifiedAclFeature = getAclFeature(child1, cluster);
        assertEquals("Old Reference count should be 1", 1, child1AclFeature.getRefCount());
        assertEquals("New Reference count should be 1", 1, modifiedAclFeature.getRefCount());
        // removing the new added ACL entry should refer to old ACLfeature
        AclEntry aclEntry = new AclEntry.Builder().setScope(ACCESS).setType(USER).setName("user1").build();
        fs.removeAclEntries(child1, Lists.newArrayList(aclEntry));
        assertEquals("Old Reference count should be 2 again", 2, child1AclFeature.getRefCount());
        assertEquals("New Reference count should be 0", 0, modifiedAclFeature.getRefCount());
    }
    {
        // verify the reference count on deletion of Acls
        fs.removeAcl(child2);
        assertEquals("Reference count should be 1", 1, child1AclFeature.getRefCount());
    }
    {
        // verify the reference count on deletion of dir with ACL
        fs.delete(child1, true);
        assertEquals("Reference count should be 0", 0, child1AclFeature.getRefCount());
    }
    Path file1 = new Path(p1, "file1");
    Path file2 = new Path(p1, "file2");
    AclFeature fileAclFeature;
    {
        // Using same reference on creation of file
        fs.create(file1).close();
        fileAclFeature = getAclFeature(file1, cluster);
        assertEquals("Reference count should be 1", 1, fileAclFeature.getRefCount());
        fs.create(file2).close();
        assertEquals("Reference count should be 2", 2, fileAclFeature.getRefCount());
    }
    {
        // modifying ACLs on file should decrease the reference count on old
        // instance and increase on the new instance
        List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "user1", ALL));
        // adding new ACL entry
        fs.modifyAclEntries(file1, aclSpec);
        AclFeature modifiedFileAcl = getAclFeature(file1, cluster);
        assertEquals("Old Reference count should be 1", 1, fileAclFeature.getRefCount());
        assertEquals("New Reference count should be 1", 1, modifiedFileAcl.getRefCount());
        // removing the new added ACL entry should refer to old ACLfeature
        AclEntry aclEntry = new AclEntry.Builder().setScope(ACCESS).setType(USER).setName("user1").build();
        fs.removeAclEntries(file1, Lists.newArrayList(aclEntry));
        assertEquals("Old Reference count should be 2", 2, fileAclFeature.getRefCount());
        assertEquals("New Reference count should be 0", 0, modifiedFileAcl.getRefCount());
    }
    {
        // reference count should be decreased on deletion of files with ACLs
        fs.delete(file2, true);
        assertEquals("Reference count should be decreased on delete of the file", 1, fileAclFeature.getRefCount());
        fs.delete(file1, true);
        assertEquals("Reference count should be decreased on delete of the file", 0, fileAclFeature.getRefCount());
        // On reference count reaches 0 instance should be removed from map
        fs.create(file1).close();
        AclFeature newFileAclFeature = getAclFeature(file1, cluster);
        assertNotSame("Instance should be different on reference count 0", fileAclFeature, newFileAclFeature);
        fileAclFeature = newFileAclFeature;
    }
    Map<AclFeature, Integer> restartRefCounter = new HashMap<>();
    // Restart the Namenode to check the references.
    // Here reference counts will not be same after restart because, while
    // shutting down namenode will not call any removal of AclFeature.
    // However this is applicable only in case of tests as in real-cluster JVM
    // itself will be new.
    List<AclFeature> entriesBeforeRestart = AclStorage.getUniqueAclFeatures().getEntries();
    {
        //restart by loading edits
        for (AclFeature aclFeature : entriesBeforeRestart) {
            restartRefCounter.put(aclFeature, aclFeature.getRefCount());
        }
        cluster.restartNameNode(true);
        List<AclFeature> entriesAfterRestart = AclStorage.getUniqueAclFeatures().getEntries();
        assertEquals("Entries before and after should be same", entriesBeforeRestart, entriesAfterRestart);
        for (AclFeature aclFeature : entriesAfterRestart) {
            int before = restartRefCounter.get(aclFeature);
            assertEquals("ReferenceCount After Restart should be doubled", before * 2, aclFeature.getRefCount());
        }
    }
    {
        //restart by loading fsimage
        cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
        cluster.getNameNodeRpc().saveNamespace(0, 0);
        cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
        cluster.restartNameNode(true);
        List<AclFeature> entriesAfterRestart = AclStorage.getUniqueAclFeatures().getEntries();
        assertEquals("Entries before and after should be same", entriesBeforeRestart, entriesAfterRestart);
        for (AclFeature aclFeature : entriesAfterRestart) {
            int before = restartRefCounter.get(aclFeature);
            assertEquals("ReferenceCount After 2 Restarts should be tripled", before * 3, aclFeature.getRefCount());
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) AclEntry(org.apache.hadoop.fs.permission.AclEntry) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) Test(org.junit.Test)

Example 94 with AclEntry

use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.

the class FSAclBaseTest method testModifyAclEntriesMustBeOwnerOrSuper.

@Test
public void testModifyAclEntriesMustBeOwnerOrSuper() throws Exception {
    Path bruceDir = new Path(path, "bruce");
    Path bruceFile = new Path(bruceDir, "file");
    fs.mkdirs(bruceDir);
    fs.setOwner(bruceDir, "bruce", null);
    fsAsBruce.create(bruceFile).close();
    List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "diana", ALL));
    fsAsBruce.modifyAclEntries(bruceFile, aclSpec);
    fs.modifyAclEntries(bruceFile, aclSpec);
    fsAsSupergroupMember.modifyAclEntries(bruceFile, aclSpec);
    exception.expect(AccessControlException.class);
    fsAsDiana.modifyAclEntries(bruceFile, aclSpec);
}
Also used : Path(org.apache.hadoop.fs.Path) AclEntry(org.apache.hadoop.fs.permission.AclEntry) Test(org.junit.Test)

Example 95 with AclEntry

use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.

the class FSAclBaseTest method testRemoveDefaultAcl.

@Test
public void testRemoveDefaultAcl() throws Exception {
    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
    List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, OTHER, NONE), aclEntry(DEFAULT, USER, "foo", ALL));
    fs.setAcl(path, aclSpec);
    fs.removeDefaultAcl(path);
    AclStatus s = fs.getAclStatus(path);
    AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned);
    assertPermission((short) 010770);
    assertAclFeature(true);
    // restart of the cluster
    restartCluster();
    s = fs.getAclStatus(path);
    AclEntry[] afterRestart = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(returned, afterRestart);
}
Also used : AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) Test(org.junit.Test)

Aggregations

AclEntry (org.apache.hadoop.fs.permission.AclEntry)137 Test (org.junit.Test)90 AclStatus (org.apache.hadoop.fs.permission.AclStatus)81 Path (org.apache.hadoop.fs.Path)52 FsPermission (org.apache.hadoop.fs.permission.FsPermission)25 ArrayList (java.util.ArrayList)11 FSAclBaseTest (org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest)11 FileSystem (org.apache.hadoop.fs.FileSystem)10 Configuration (org.apache.hadoop.conf.Configuration)7 FileStatus (org.apache.hadoop.fs.FileStatus)6 MockResponse (com.squareup.okhttp.mockwebserver.MockResponse)5 ScopedAclEntries (org.apache.hadoop.fs.permission.ScopedAclEntries)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)5 IOException (java.io.IOException)4 List (java.util.List)4 AclEntryScope (org.apache.hadoop.fs.permission.AclEntryScope)4 AclEntryProto (org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto)4 URI (java.net.URI)3 AclEntryType (org.apache.hadoop.fs.permission.AclEntryType)3