Search in sources :

Example 1 with CachePoolStats

use of org.apache.hadoop.hdfs.protocol.CachePoolStats in project hadoop by apache.

the class PBHelperClient method convert.

public static CachePoolEntry convert(CachePoolEntryProto proto) {
    CachePoolInfo info = convert(proto.getInfo());
    CachePoolStats stats = convert(proto.getStats());
    return new CachePoolEntry(info, stats);
}
Also used : CachePoolStats(org.apache.hadoop.hdfs.protocol.CachePoolStats) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry)

Example 2 with CachePoolStats

use of org.apache.hadoop.hdfs.protocol.CachePoolStats in project hadoop by apache.

the class TestCacheDirectives method testLimit.

@Test(timeout = 120000)
public void testLimit() throws Exception {
    try {
        dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l));
        fail("Should not be able to set a negative limit");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("negative", e);
    }
    final String destiny = "poolofdestiny";
    final Path path1 = new Path("/destiny");
    DFSTestUtil.createFile(dfs, path1, 2 * BLOCK_SIZE, (short) 1, 0x9494);
    // Start off with a limit that is too small
    final CachePoolInfo poolInfo = new CachePoolInfo(destiny).setLimit(2 * BLOCK_SIZE - 1);
    dfs.addCachePool(poolInfo);
    final CacheDirectiveInfo info1 = new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path1).build();
    try {
        dfs.addCacheDirective(info1);
        fail("Should not be able to cache when there is no more limit");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Raise the limit up to fit and it should work this time
    poolInfo.setLimit(2 * BLOCK_SIZE);
    dfs.modifyCachePool(poolInfo);
    long id1 = dfs.addCacheDirective(info1);
    waitForCachePoolStats(dfs, 2 * BLOCK_SIZE, 2 * BLOCK_SIZE, 1, 1, poolInfo, "testLimit:1");
    // Adding another file, it shouldn't be cached
    final Path path2 = new Path("/failure");
    DFSTestUtil.createFile(dfs, path2, BLOCK_SIZE, (short) 1, 0x9495);
    try {
        dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path2).build(), EnumSet.noneOf(CacheFlag.class));
        fail("Should not be able to add another cached file");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Bring the limit down, the first file should get uncached
    poolInfo.setLimit(BLOCK_SIZE);
    dfs.modifyCachePool(poolInfo);
    waitForCachePoolStats(dfs, 2 * BLOCK_SIZE, 0, 1, 0, poolInfo, "testLimit:2");
    RemoteIterator<CachePoolEntry> it = dfs.listCachePools();
    assertTrue("Expected a cache pool", it.hasNext());
    CachePoolStats stats = it.next().getStats();
    assertEquals("Overlimit bytes should be difference of needed and limit", BLOCK_SIZE, stats.getBytesOverlimit());
    // Moving a directive to a pool without enough limit should fail
    CachePoolInfo inadequate = new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE);
    dfs.addCachePool(inadequate);
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(), EnumSet.noneOf(CacheFlag.class));
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Succeeds when force=true
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(), EnumSet.of(CacheFlag.FORCE));
    // Also can add with force=true
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName()).setPath(path1).build(), EnumSet.of(CacheFlag.FORCE));
}
Also used : Path(org.apache.hadoop.fs.Path) CacheFlag(org.apache.hadoop.fs.CacheFlag) CachePoolStats(org.apache.hadoop.hdfs.protocol.CachePoolStats) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Aggregations

CachePoolEntry (org.apache.hadoop.hdfs.protocol.CachePoolEntry)2 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)2 CachePoolStats (org.apache.hadoop.hdfs.protocol.CachePoolStats)2 CacheFlag (org.apache.hadoop.fs.CacheFlag)1 InvalidRequestException (org.apache.hadoop.fs.InvalidRequestException)1 Path (org.apache.hadoop.fs.Path)1 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)1 Test (org.junit.Test)1