Search in sources :

Example 6 with CachePoolEntry

use of org.apache.hadoop.hdfs.protocol.CachePoolEntry in project hadoop by apache.

the class TestCacheDirectives method testListCachePoolPermissions.

@Test(timeout = 60000)
public void testListCachePoolPermissions() throws Exception {
    final UserGroupInformation myUser = UserGroupInformation.createRemoteUser("myuser");
    final DistributedFileSystem myDfs = (DistributedFileSystem) DFSTestUtil.getFileSystemAs(myUser, conf);
    final String poolName = "poolparty";
    dfs.addCachePool(new CachePoolInfo(poolName).setMode(new FsPermission((short) 0700)));
    // Should only see partial info
    RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
    CachePoolInfo info = it.next().getInfo();
    assertFalse(it.hasNext());
    assertEquals("Expected pool name", poolName, info.getPoolName());
    assertNull("Unexpected owner name", info.getOwnerName());
    assertNull("Unexpected group name", info.getGroupName());
    assertNull("Unexpected mode", info.getMode());
    assertNull("Unexpected limit", info.getLimit());
    // Modify the pool so myuser is now the owner
    final long limit = 99;
    dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(myUser.getShortUserName()).setLimit(limit));
    // Should see full info
    it = myDfs.listCachePools();
    info = it.next().getInfo();
    assertFalse(it.hasNext());
    assertEquals("Expected pool name", poolName, info.getPoolName());
    assertEquals("Mismatched owner name", myUser.getShortUserName(), info.getOwnerName());
    assertNotNull("Expected group name", info.getGroupName());
    assertEquals("Mismatched mode", (short) 0700, info.getMode().toShort());
    assertEquals("Mismatched limit", limit, (long) info.getLimit());
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 7 with CachePoolEntry

use of org.apache.hadoop.hdfs.protocol.CachePoolEntry in project hadoop by apache.

the class TestCacheDirectives method testLimit.

@Test(timeout = 120000)
public void testLimit() throws Exception {
    try {
        dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l));
        fail("Should not be able to set a negative limit");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("negative", e);
    }
    final String destiny = "poolofdestiny";
    final Path path1 = new Path("/destiny");
    DFSTestUtil.createFile(dfs, path1, 2 * BLOCK_SIZE, (short) 1, 0x9494);
    // Start off with a limit that is too small
    final CachePoolInfo poolInfo = new CachePoolInfo(destiny).setLimit(2 * BLOCK_SIZE - 1);
    dfs.addCachePool(poolInfo);
    final CacheDirectiveInfo info1 = new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path1).build();
    try {
        dfs.addCacheDirective(info1);
        fail("Should not be able to cache when there is no more limit");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Raise the limit up to fit and it should work this time
    poolInfo.setLimit(2 * BLOCK_SIZE);
    dfs.modifyCachePool(poolInfo);
    long id1 = dfs.addCacheDirective(info1);
    waitForCachePoolStats(dfs, 2 * BLOCK_SIZE, 2 * BLOCK_SIZE, 1, 1, poolInfo, "testLimit:1");
    // Adding another file, it shouldn't be cached
    final Path path2 = new Path("/failure");
    DFSTestUtil.createFile(dfs, path2, BLOCK_SIZE, (short) 1, 0x9495);
    try {
        dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path2).build(), EnumSet.noneOf(CacheFlag.class));
        fail("Should not be able to add another cached file");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Bring the limit down, the first file should get uncached
    poolInfo.setLimit(BLOCK_SIZE);
    dfs.modifyCachePool(poolInfo);
    waitForCachePoolStats(dfs, 2 * BLOCK_SIZE, 0, 1, 0, poolInfo, "testLimit:2");
    RemoteIterator<CachePoolEntry> it = dfs.listCachePools();
    assertTrue("Expected a cache pool", it.hasNext());
    CachePoolStats stats = it.next().getStats();
    assertEquals("Overlimit bytes should be difference of needed and limit", BLOCK_SIZE, stats.getBytesOverlimit());
    // Moving a directive to a pool without enough limit should fail
    CachePoolInfo inadequate = new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE);
    dfs.addCachePool(inadequate);
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(), EnumSet.noneOf(CacheFlag.class));
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Succeeds when force=true
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(), EnumSet.of(CacheFlag.FORCE));
    // Also can add with force=true
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName()).setPath(path1).build(), EnumSet.of(CacheFlag.FORCE));
}
Also used : Path(org.apache.hadoop.fs.Path) CacheFlag(org.apache.hadoop.fs.CacheFlag) CachePoolStats(org.apache.hadoop.hdfs.protocol.CachePoolStats) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Example 8 with CachePoolEntry

use of org.apache.hadoop.hdfs.protocol.CachePoolEntry in project hadoop by apache.

the class TestCacheDirectives method testCreateAndModifyPools.

@Test(timeout = 60000)
public void testCreateAndModifyPools() throws Exception {
    String poolName = "pool1";
    String ownerName = "abc";
    String groupName = "123";
    FsPermission mode = new FsPermission((short) 0755);
    long limit = 150;
    dfs.addCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
    RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
    CachePoolInfo info = iter.next().getInfo();
    assertEquals(poolName, info.getPoolName());
    assertEquals(ownerName, info.getOwnerName());
    assertEquals(groupName, info.getGroupName());
    ownerName = "def";
    groupName = "456";
    mode = new FsPermission((short) 0700);
    limit = 151;
    dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
    iter = dfs.listCachePools();
    info = iter.next().getInfo();
    assertEquals(poolName, info.getPoolName());
    assertEquals(ownerName, info.getOwnerName());
    assertEquals(groupName, info.getGroupName());
    assertEquals(mode, info.getMode());
    assertEquals(limit, (long) info.getLimit());
    dfs.removeCachePool(poolName);
    iter = dfs.listCachePools();
    assertFalse("expected no cache pools after deleting pool", iter.hasNext());
    proto.listCachePools(null);
    try {
        proto.removeCachePool("pool99");
        fail("expected to get an exception when " + "removing a non-existent pool.");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Cannot remove non-existent", ioe);
    }
    try {
        proto.removeCachePool(poolName);
        fail("expected to get an exception when " + "removing a non-existent pool.");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Cannot remove non-existent", ioe);
    }
    iter = dfs.listCachePools();
    assertFalse("expected no cache pools after deleting pool", iter.hasNext());
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Example 9 with CachePoolEntry

use of org.apache.hadoop.hdfs.protocol.CachePoolEntry in project hadoop by apache.

the class TestRetryCacheWithHA method listCachePools.

@SuppressWarnings("unchecked")
private void listCachePools(HashSet<String> poolNames, int active) throws Exception {
    HashSet<String> tmpNames = (HashSet<String>) poolNames.clone();
    RemoteIterator<CachePoolEntry> pools = dfs.listCachePools();
    int poolCount = poolNames.size();
    for (int i = 0; i < poolCount; i++) {
        CachePoolEntry pool = pools.next();
        String pollName = pool.getInfo().getPoolName();
        assertTrue("The pool name should be expected", tmpNames.remove(pollName));
        if (i % 2 == 0) {
            int standby = active;
            active = (standby == 0) ? 1 : 0;
            cluster.transitionToStandby(standby);
            cluster.transitionToActive(active);
            cluster.waitActive(active);
        }
    }
    assertTrue("All pools must be found", tmpNames.isEmpty());
}
Also used : CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) HashSet(java.util.HashSet)

Aggregations

CachePoolEntry (org.apache.hadoop.hdfs.protocol.CachePoolEntry)9 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)7 Test (org.junit.Test)5 Path (org.apache.hadoop.fs.Path)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)3 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)3 IOException (java.io.IOException)2 Date (java.util.Date)2 InvalidRequestException (org.apache.hadoop.fs.InvalidRequestException)2 CacheDirectiveEntry (org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry)2 CachePoolStats (org.apache.hadoop.hdfs.protocol.CachePoolStats)2 ServiceException (com.google.protobuf.ServiceException)1 HashSet (java.util.HashSet)1 CacheFlag (org.apache.hadoop.fs.CacheFlag)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 ListCachePoolsResponseProto (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto)1 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)1