Search in sources :

Example 46 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestCacheDirectives method testCacheManagerRestart.

@Test(timeout = 60000)
public void testCacheManagerRestart() throws Exception {
    SecondaryNameNode secondary = null;
    try {
        // Start a secondary namenode
        conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
        secondary = new SecondaryNameNode(conf);
        // Create and validate a pool
        final String pool = "poolparty";
        String groupName = "partygroup";
        FsPermission mode = new FsPermission((short) 0777);
        long limit = 747;
        dfs.addCachePool(new CachePoolInfo(pool).setGroupName(groupName).setMode(mode).setLimit(limit));
        RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
        assertTrue("No cache pools found", pit.hasNext());
        CachePoolInfo info = pit.next().getInfo();
        assertEquals(pool, info.getPoolName());
        assertEquals(groupName, info.getGroupName());
        assertEquals(mode, info.getMode());
        assertEquals(limit, (long) info.getLimit());
        assertFalse("Unexpected # of cache pools found", pit.hasNext());
        // Create some cache entries
        int numEntries = 10;
        String entryPrefix = "/party-";
        long prevId = -1;
        final Date expiry = new Date();
        for (int i = 0; i < numEntries; i++) {
            prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path(entryPrefix + i)).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).build());
        }
        RemoteIterator<CacheDirectiveEntry> dit = dfs.listCacheDirectives(null);
        for (int i = 0; i < numEntries; i++) {
            assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
            CacheDirectiveInfo cd = dit.next().getInfo();
            assertEquals(i + 1, cd.getId().longValue());
            assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
            assertEquals(pool, cd.getPool());
        }
        assertFalse("Unexpected # of cache directives found", dit.hasNext());
        // Checkpoint once to set some cache pools and directives on 2NN side
        secondary.doCheckpoint();
        // Add some more CacheManager state
        final String imagePool = "imagePool";
        dfs.addCachePool(new CachePoolInfo(imagePool));
        prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/image")).setPool(imagePool).build());
        // Save a new image to force a fresh fsimage download
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        dfs.saveNamespace();
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // Checkpoint again forcing a reload of FSN state
        boolean fetchImage = secondary.doCheckpoint();
        assertTrue("Secondary should have fetched a new fsimage from NameNode", fetchImage);
        // Remove temp pool and directive
        dfs.removeCachePool(imagePool);
        // Restart namenode
        cluster.restartNameNode();
        // Check that state came back up
        pit = dfs.listCachePools();
        assertTrue("No cache pools found", pit.hasNext());
        info = pit.next().getInfo();
        assertEquals(pool, info.getPoolName());
        assertEquals(pool, info.getPoolName());
        assertEquals(groupName, info.getGroupName());
        assertEquals(mode, info.getMode());
        assertEquals(limit, (long) info.getLimit());
        assertFalse("Unexpected # of cache pools found", pit.hasNext());
        dit = dfs.listCacheDirectives(null);
        for (int i = 0; i < numEntries; i++) {
            assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
            CacheDirectiveInfo cd = dit.next().getInfo();
            assertEquals(i + 1, cd.getId().longValue());
            assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
            assertEquals(pool, cd.getPool());
            assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
        }
        assertFalse("Unexpected # of cache directives found", dit.hasNext());
        long nextId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foobar")).setPool(pool).build());
        assertEquals(prevId + 1, nextId);
    } finally {
        if (secondary != null) {
            secondary.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Date(java.util.Date) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) FsPermission(org.apache.hadoop.fs.permission.FsPermission) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Example 47 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestAuditLoggerWithCommands method testRemoveCachePool.

@Test
public void testRemoveCachePool() throws Exception {
    removeExistingCachePools(null);
    CachePoolInfo cacheInfo = new CachePoolInfo("pool1").setMode(new FsPermission((short) 0));
    ((DistributedFileSystem) fs).addCachePool(cacheInfo);
    fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
    try {
        ((DistributedFileSystem) fileSys).removeCachePool("pool1");
        fail("The operation should have failed with AccessControlException");
    } catch (AccessControlException ace) {
    }
    String aceRemoveCachePoolPattern = ".*allowed=false.*ugi=theDoctor.*cmd=removeCachePool.*";
    int length = verifyAuditLogs(aceRemoveCachePoolPattern);
    assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
    try {
        fileSys.close();
        ((DistributedFileSystem) fileSys).removeCachePool("pool1");
        fail("The operation should have failed with IOException");
    } catch (IOException e) {
    }
    assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
Also used : AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Example 48 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestAuditLoggerWithCommands method testAddCachePool.

@Test
public void testAddCachePool() throws Exception {
    removeExistingCachePools(null);
    CachePoolInfo cacheInfo = new CachePoolInfo("pool1").setMode(new FsPermission((short) 0));
    fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
    try {
        ((DistributedFileSystem) fileSys).addCachePool(cacheInfo);
        fail("The operation should have failed with AccessControlException");
    } catch (AccessControlException ace) {
    }
    String aceAddCachePoolPattern = ".*allowed=false.*ugi=theDoctor.*cmd=addCachePool.*";
    int length = verifyAuditLogs(aceAddCachePoolPattern);
    try {
        fileSys.close();
        ((DistributedFileSystem) fileSys).addCachePool(cacheInfo);
        fail("The operation should have failed with IOException");
    } catch (IOException e) {
    }
    assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
Also used : AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Example 49 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestAuditLoggerWithCommands method testGetContentSummary.

@Test
public void testGetContentSummary() throws IOException {
    Path dir1 = new Path("/dir1");
    Path dir2 = new Path("/dir2");
    String acePattern = ".*allowed=false.*ugi=theEngineer.*cmd=contentSummary.*";
    fs.mkdirs(dir1, new FsPermission((short) 0600));
    fs.mkdirs(dir2, new FsPermission((short) 0600));
    fs.setOwner(dir1, user1.getUserName(), user1.getPrimaryGroupName());
    fs.setOwner(dir2, user2.getUserName(), user2.getPrimaryGroupName());
    try {
        fs2.getContentSummary(new Path("/"));
        fail("The operation should have failed with AccessControlException");
    } catch (AccessControlException ace) {
    }
    int length = verifyAuditLogs(acePattern);
    try {
        fs2.getContentSummary(new Path("/dir3"));
        fail("The operation should have failed with IOException");
    } catch (IOException e) {
    }
    assertTrue("Unexpected log from getContentSummary", length == auditlog.getOutput().split("\n").length);
}
Also used : Path(org.apache.hadoop.fs.Path) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) Test(org.junit.Test)

Example 50 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestAuditLoggerWithCommands method testRemoveCacheDirective.

@Test
public void testRemoveCacheDirective() throws Exception {
    removeExistingCachePools(null);
    proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short) 0)));
    CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
    String aceRemoveCachePattern = ".*allowed=false.*ugi=theDoctor.*cmd=removeCache.*";
    int length = -1;
    Long id = ((DistributedFileSystem) fs).addCacheDirective(alpha);
    fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
    try {
        ((DistributedFileSystem) fileSys).removeCacheDirective(id);
        fail("It should have failed with an AccessControlException");
    } catch (AccessControlException ace) {
        length = verifyAuditLogs(aceRemoveCachePattern);
    }
    try {
        fileSys.close();
        ((DistributedFileSystem) fileSys).removeCacheDirective(id);
        fail("The operation should have failed with IOException");
    } catch (IOException e) {
    }
    assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
Also used : Path(org.apache.hadoop.fs.Path) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Aggregations

FsPermission (org.apache.hadoop.fs.permission.FsPermission)427 Path (org.apache.hadoop.fs.Path)267 Test (org.junit.Test)180 IOException (java.io.IOException)120 FileSystem (org.apache.hadoop.fs.FileSystem)93 Configuration (org.apache.hadoop.conf.Configuration)89 FileStatus (org.apache.hadoop.fs.FileStatus)87 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)52 AccessControlException (org.apache.hadoop.security.AccessControlException)43 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)36 FileNotFoundException (java.io.FileNotFoundException)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)29 File (java.io.File)26 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)26 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)26 AclEntry (org.apache.hadoop.fs.permission.AclEntry)25 ArrayList (java.util.ArrayList)22 HashMap (java.util.HashMap)19 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)16 URI (java.net.URI)15