Search in sources :

Example 21 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testBasicPoolOperations.

@Test(timeout = 60000)
public void testBasicPoolOperations() throws Exception {
    final String poolName = "pool1";
    CachePoolInfo info = new CachePoolInfo(poolName).setOwnerName("bob").setGroupName("bobgroup").setMode(new FsPermission((short) 0755)).setLimit(150l);
    // Add a pool
    dfs.addCachePool(info);
    // Do some bad addCachePools
    try {
        dfs.addCachePool(info);
        fail("added the pool with the same name twice");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("pool1 already exists", ioe);
    }
    try {
        dfs.addCachePool(new CachePoolInfo(""));
        fail("added empty pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        dfs.addCachePool(null);
        fail("added null pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe);
    }
    try {
        proto.addCachePool(new CachePoolInfo(""));
        fail("added empty pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        proto.addCachePool(null);
        fail("added null pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe);
    }
    // Modify the pool
    info.setOwnerName("jane").setGroupName("janegroup").setMode(new FsPermission((short) 0700)).setLimit(314l);
    dfs.modifyCachePool(info);
    // Do some invalid modify pools
    try {
        dfs.modifyCachePool(new CachePoolInfo("fool"));
        fail("modified non-existent cache pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("fool does not exist", ioe);
    }
    try {
        dfs.modifyCachePool(new CachePoolInfo(""));
        fail("modified empty pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        dfs.modifyCachePool(null);
        fail("modified null pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe);
    }
    try {
        proto.modifyCachePool(new CachePoolInfo(""));
        fail("modified empty pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        proto.modifyCachePool(null);
        fail("modified null pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe);
    }
    // Remove the pool
    dfs.removeCachePool(poolName);
    // Do some bad removePools
    try {
        dfs.removeCachePool("pool99");
        fail("expected to get an exception when " + "removing a non-existent pool.");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Cannot remove " + "non-existent cache pool", ioe);
    }
    try {
        dfs.removeCachePool(poolName);
        fail("expected to get an exception when " + "removing a non-existent pool.");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Cannot remove " + "non-existent cache pool", ioe);
    }
    try {
        dfs.removeCachePool("");
        fail("removed empty pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        dfs.removeCachePool(null);
        fail("removed null pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        proto.removeCachePool("");
        fail("removed empty pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        proto.removeCachePool(null);
        fail("removed null pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    info = new CachePoolInfo("pool2");
    dfs.addCachePool(info);
    // Perform cache pool operations using a closed file system.
    DistributedFileSystem dfs1 = (DistributedFileSystem) cluster.getNewFileSystemInstance(0);
    dfs1.close();
    try {
        dfs1.listCachePools();
        fail("listCachePools using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
    try {
        dfs1.addCachePool(info);
        fail("addCachePool using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
    try {
        dfs1.modifyCachePool(info);
        fail("modifyCachePool using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
    try {
        dfs1.removeCachePool(poolName);
        fail("removeCachePool using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Example 22 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testNoBackingReplica.

@Test(timeout = 60000)
public void testNoBackingReplica() throws Exception {
    // Cache all three replicas for a file.
    final Path filename = new Path("/noback");
    final short replication = (short) 3;
    DFSTestUtil.createFile(dfs, filename, 1, replication, 0x0BAC);
    dfs.addCachePool(new CachePoolInfo("pool"));
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(filename).setReplication(replication).build());
    waitForCachedBlocks(namenode, 1, replication, "testNoBackingReplica:1");
    // Pause cache reports while we change the replication factor.
    // This will orphan some cached replicas.
    DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, true);
    try {
        dfs.setReplication(filename, (short) 1);
        DFSTestUtil.waitForReplication(dfs, filename, (short) 1, 30000);
        // The cache locations should drop down to 1 even without cache reports.
        waitForCachedBlocks(namenode, 1, (short) 1, "testNoBackingReplica:2");
    } finally {
        DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, false);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Example 23 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testListCachePoolPermissions.

@Test(timeout = 60000)
public void testListCachePoolPermissions() throws Exception {
    final UserGroupInformation myUser = UserGroupInformation.createRemoteUser("myuser");
    final DistributedFileSystem myDfs = (DistributedFileSystem) DFSTestUtil.getFileSystemAs(myUser, conf);
    final String poolName = "poolparty";
    dfs.addCachePool(new CachePoolInfo(poolName).setMode(new FsPermission((short) 0700)));
    // Should only see partial info
    RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
    CachePoolInfo info = it.next().getInfo();
    assertFalse(it.hasNext());
    assertEquals("Expected pool name", poolName, info.getPoolName());
    assertNull("Unexpected owner name", info.getOwnerName());
    assertNull("Unexpected group name", info.getGroupName());
    assertNull("Unexpected mode", info.getMode());
    assertNull("Unexpected limit", info.getLimit());
    // Modify the pool so myuser is now the owner
    final long limit = 99;
    dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(myUser.getShortUserName()).setLimit(limit));
    // Should see full info
    it = myDfs.listCachePools();
    info = it.next().getInfo();
    assertFalse(it.hasNext());
    assertEquals("Expected pool name", poolName, info.getPoolName());
    assertEquals("Mismatched owner name", myUser.getShortUserName(), info.getOwnerName());
    assertNotNull("Expected group name", info.getGroupName());
    assertEquals("Mismatched mode", (short) 0700, info.getMode().toShort());
    assertEquals("Mismatched limit", limit, (long) info.getLimit());
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 24 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testLimit.

@Test(timeout = 120000)
public void testLimit() throws Exception {
    try {
        dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l));
        fail("Should not be able to set a negative limit");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("negative", e);
    }
    final String destiny = "poolofdestiny";
    final Path path1 = new Path("/destiny");
    DFSTestUtil.createFile(dfs, path1, 2 * BLOCK_SIZE, (short) 1, 0x9494);
    // Start off with a limit that is too small
    final CachePoolInfo poolInfo = new CachePoolInfo(destiny).setLimit(2 * BLOCK_SIZE - 1);
    dfs.addCachePool(poolInfo);
    final CacheDirectiveInfo info1 = new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path1).build();
    try {
        dfs.addCacheDirective(info1);
        fail("Should not be able to cache when there is no more limit");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Raise the limit up to fit and it should work this time
    poolInfo.setLimit(2 * BLOCK_SIZE);
    dfs.modifyCachePool(poolInfo);
    long id1 = dfs.addCacheDirective(info1);
    waitForCachePoolStats(dfs, 2 * BLOCK_SIZE, 2 * BLOCK_SIZE, 1, 1, poolInfo, "testLimit:1");
    // Adding another file, it shouldn't be cached
    final Path path2 = new Path("/failure");
    DFSTestUtil.createFile(dfs, path2, BLOCK_SIZE, (short) 1, 0x9495);
    try {
        dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path2).build(), EnumSet.noneOf(CacheFlag.class));
        fail("Should not be able to add another cached file");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Bring the limit down, the first file should get uncached
    poolInfo.setLimit(BLOCK_SIZE);
    dfs.modifyCachePool(poolInfo);
    waitForCachePoolStats(dfs, 2 * BLOCK_SIZE, 0, 1, 0, poolInfo, "testLimit:2");
    RemoteIterator<CachePoolEntry> it = dfs.listCachePools();
    assertTrue("Expected a cache pool", it.hasNext());
    CachePoolStats stats = it.next().getStats();
    assertEquals("Overlimit bytes should be difference of needed and limit", BLOCK_SIZE, stats.getBytesOverlimit());
    // Moving a directive to a pool without enough limit should fail
    CachePoolInfo inadequate = new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE);
    dfs.addCachePool(inadequate);
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(), EnumSet.noneOf(CacheFlag.class));
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Succeeds when force=true
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(), EnumSet.of(CacheFlag.FORCE));
    // Also can add with force=true
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName()).setPath(path1).build(), EnumSet.of(CacheFlag.FORCE));
}
Also used : Path(org.apache.hadoop.fs.Path) CacheFlag(org.apache.hadoop.fs.CacheFlag) CachePoolStats(org.apache.hadoop.hdfs.protocol.CachePoolStats) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Example 25 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testCreateAndModifyPools.

@Test(timeout = 60000)
public void testCreateAndModifyPools() throws Exception {
    String poolName = "pool1";
    String ownerName = "abc";
    String groupName = "123";
    FsPermission mode = new FsPermission((short) 0755);
    long limit = 150;
    dfs.addCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
    RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
    CachePoolInfo info = iter.next().getInfo();
    assertEquals(poolName, info.getPoolName());
    assertEquals(ownerName, info.getOwnerName());
    assertEquals(groupName, info.getGroupName());
    ownerName = "def";
    groupName = "456";
    mode = new FsPermission((short) 0700);
    limit = 151;
    dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
    iter = dfs.listCachePools();
    info = iter.next().getInfo();
    assertEquals(poolName, info.getPoolName());
    assertEquals(ownerName, info.getOwnerName());
    assertEquals(groupName, info.getGroupName());
    assertEquals(mode, info.getMode());
    assertEquals(limit, (long) info.getLimit());
    dfs.removeCachePool(poolName);
    iter = dfs.listCachePools();
    assertFalse("expected no cache pools after deleting pool", iter.hasNext());
    proto.listCachePools(null);
    try {
        proto.removeCachePool("pool99");
        fail("expected to get an exception when " + "removing a non-existent pool.");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Cannot remove non-existent", ioe);
    }
    try {
        proto.removeCachePool(poolName);
        fail("expected to get an exception when " + "removing a non-existent pool.");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Cannot remove non-existent", ioe);
    }
    iter = dfs.listCachePools();
    assertFalse("expected no cache pools after deleting pool", iter.hasNext());
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Aggregations

CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)36 Test (org.junit.Test)26 Path (org.apache.hadoop.fs.Path)20 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)20 FsPermission (org.apache.hadoop.fs.permission.FsPermission)14 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)14 IOException (java.io.IOException)11 AccessControlException (org.apache.hadoop.security.AccessControlException)8 CachePoolEntry (org.apache.hadoop.hdfs.protocol.CachePoolEntry)7 CacheDirectiveEntry (org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry)6 InvalidRequestException (org.apache.hadoop.fs.InvalidRequestException)4 ByteBuffer (java.nio.ByteBuffer)3 Date (java.util.Date)3 LinkedList (java.util.LinkedList)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 HashSet (java.util.HashSet)2 Configuration (org.apache.hadoop.conf.Configuration)2 CacheFlag (org.apache.hadoop.fs.CacheFlag)2 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)2