Search in sources :

Example 1 with CachePoolEntry

use of org.apache.hadoop.hdfs.protocol.CachePoolEntry in project hadoop by apache.

the class PBHelperClient method convert.

public static CachePoolEntry convert(CachePoolEntryProto proto) {
    CachePoolInfo info = convert(proto.getInfo());
    CachePoolStats stats = convert(proto.getStats());
    return new CachePoolEntry(info, stats);
}
Also used : CachePoolStats(org.apache.hadoop.hdfs.protocol.CachePoolStats) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry)

Example 2 with CachePoolEntry

use of org.apache.hadoop.hdfs.protocol.CachePoolEntry in project hadoop by apache.

the class TestCacheDirectives method testCacheManagerRestart.

@Test(timeout = 60000)
public void testCacheManagerRestart() throws Exception {
    SecondaryNameNode secondary = null;
    try {
        // Start a secondary namenode
        conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
        secondary = new SecondaryNameNode(conf);
        // Create and validate a pool
        final String pool = "poolparty";
        String groupName = "partygroup";
        FsPermission mode = new FsPermission((short) 0777);
        long limit = 747;
        dfs.addCachePool(new CachePoolInfo(pool).setGroupName(groupName).setMode(mode).setLimit(limit));
        RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
        assertTrue("No cache pools found", pit.hasNext());
        CachePoolInfo info = pit.next().getInfo();
        assertEquals(pool, info.getPoolName());
        assertEquals(groupName, info.getGroupName());
        assertEquals(mode, info.getMode());
        assertEquals(limit, (long) info.getLimit());
        assertFalse("Unexpected # of cache pools found", pit.hasNext());
        // Create some cache entries
        int numEntries = 10;
        String entryPrefix = "/party-";
        long prevId = -1;
        final Date expiry = new Date();
        for (int i = 0; i < numEntries; i++) {
            prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path(entryPrefix + i)).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).build());
        }
        RemoteIterator<CacheDirectiveEntry> dit = dfs.listCacheDirectives(null);
        for (int i = 0; i < numEntries; i++) {
            assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
            CacheDirectiveInfo cd = dit.next().getInfo();
            assertEquals(i + 1, cd.getId().longValue());
            assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
            assertEquals(pool, cd.getPool());
        }
        assertFalse("Unexpected # of cache directives found", dit.hasNext());
        // Checkpoint once to set some cache pools and directives on 2NN side
        secondary.doCheckpoint();
        // Add some more CacheManager state
        final String imagePool = "imagePool";
        dfs.addCachePool(new CachePoolInfo(imagePool));
        prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/image")).setPool(imagePool).build());
        // Save a new image to force a fresh fsimage download
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        dfs.saveNamespace();
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // Checkpoint again forcing a reload of FSN state
        boolean fetchImage = secondary.doCheckpoint();
        assertTrue("Secondary should have fetched a new fsimage from NameNode", fetchImage);
        // Remove temp pool and directive
        dfs.removeCachePool(imagePool);
        // Restart namenode
        cluster.restartNameNode();
        // Check that state came back up
        pit = dfs.listCachePools();
        assertTrue("No cache pools found", pit.hasNext());
        info = pit.next().getInfo();
        assertEquals(pool, info.getPoolName());
        assertEquals(pool, info.getPoolName());
        assertEquals(groupName, info.getGroupName());
        assertEquals(mode, info.getMode());
        assertEquals(limit, (long) info.getLimit());
        assertFalse("Unexpected # of cache pools found", pit.hasNext());
        dit = dfs.listCacheDirectives(null);
        for (int i = 0; i < numEntries; i++) {
            assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
            CacheDirectiveInfo cd = dit.next().getInfo();
            assertEquals(i + 1, cd.getId().longValue());
            assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
            assertEquals(pool, cd.getPool());
            assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
        }
        assertFalse("Unexpected # of cache directives found", dit.hasNext());
        long nextId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foobar")).setPool(pool).build());
        assertEquals(prevId + 1, nextId);
    } finally {
        if (secondary != null) {
            secondary.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Date(java.util.Date) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) FsPermission(org.apache.hadoop.fs.permission.FsPermission) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Example 3 with CachePoolEntry

use of org.apache.hadoop.hdfs.protocol.CachePoolEntry in project SSM by Intel-bigdata.

the class CacheFileAction method createCachePool.

private void createCachePool() throws Exception {
    RemoteIterator<CachePoolEntry> poolEntries = dfsClient.listCachePools();
    while (poolEntries.hasNext()) {
        CachePoolEntry poolEntry = poolEntries.next();
        if (poolEntry.getInfo().getPoolName().equals(SSMPOOL)) {
            return;
        }
    }
    dfsClient.addCachePool(new CachePoolInfo(SSMPOOL));
}
Also used : CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry)

Example 4 with CachePoolEntry

use of org.apache.hadoop.hdfs.protocol.CachePoolEntry in project hadoop by apache.

the class ClientNamenodeProtocolServerSideTranslatorPB method listCachePools.

@Override
public ListCachePoolsResponseProto listCachePools(RpcController controller, ListCachePoolsRequestProto request) throws ServiceException {
    try {
        BatchedEntries<CachePoolEntry> entries = server.listCachePools(request.getPrevPoolName());
        ListCachePoolsResponseProto.Builder responseBuilder = ListCachePoolsResponseProto.newBuilder();
        responseBuilder.setHasMore(entries.hasMore());
        for (int i = 0, n = entries.size(); i < n; i++) {
            responseBuilder.addEntries(PBHelperClient.convert(entries.get(i)));
        }
        return responseBuilder.build();
    } catch (IOException e) {
        throw new ServiceException(e);
    }
}
Also used : ServiceException(com.google.protobuf.ServiceException) ListCachePoolsResponseProto(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto) IOException(java.io.IOException) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry)

Example 5 with CachePoolEntry

use of org.apache.hadoop.hdfs.protocol.CachePoolEntry in project hadoop by apache.

the class TestCacheDirectives method testMaxRelativeExpiry.

@Test(timeout = 30000)
public void testMaxRelativeExpiry() throws Exception {
    // Test that negative and really big max expirations can't be set during add
    try {
        dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l));
        fail("Added a pool with a negative max expiry.");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("negative", e);
    }
    try {
        dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(Long.MAX_VALUE - 1));
        fail("Added a pool with too big of a max expiry.");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("too big", e);
    }
    // Test that setting a max relative expiry on a pool works
    CachePoolInfo coolPool = new CachePoolInfo("coolPool");
    final long poolExpiration = 1000 * 60 * 10l;
    dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration));
    RemoteIterator<CachePoolEntry> poolIt = dfs.listCachePools();
    CachePoolInfo listPool = poolIt.next().getInfo();
    assertFalse("Should only be one pool", poolIt.hasNext());
    assertEquals("Expected max relative expiry to match set value", poolExpiration, listPool.getMaxRelativeExpiryMs().longValue());
    // Test that negative and really big max expirations can't be modified
    try {
        dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l));
        fail("Added a pool with a negative max expiry.");
    } catch (InvalidRequestException e) {
        assertExceptionContains("negative", e);
    }
    try {
        dfs.modifyCachePool(coolPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER + 1));
        fail("Added a pool with too big of a max expiry.");
    } catch (InvalidRequestException e) {
        assertExceptionContains("too big", e);
    }
    // Test that adding a directives without an expiration uses the pool's max
    CacheDirectiveInfo defaultExpiry = new CacheDirectiveInfo.Builder().setPath(new Path("/blah")).setPool(coolPool.getPoolName()).build();
    dfs.addCacheDirective(defaultExpiry);
    RemoteIterator<CacheDirectiveEntry> dirIt = dfs.listCacheDirectives(defaultExpiry);
    CacheDirectiveInfo listInfo = dirIt.next().getInfo();
    assertFalse("Should only have one entry in listing", dirIt.hasNext());
    long listExpiration = listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
    assertTrue("Directive expiry should be approximately the pool's max expiry", Math.abs(listExpiration - poolExpiration) < 10 * 1000);
    // Test that the max is enforced on add for relative and absolute
    CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder().setPath(new Path("/lolcat")).setPool(coolPool.getPoolName());
    try {
        dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
        fail("Added a directive that exceeds pool's max relative expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
        dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
        fail("Added a directive that exceeds pool's max relative expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("exceeds the max relative expiration", e);
    }
    // Test that max is enforced on modify for relative and absolute Expirations
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
        fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
        fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("exceeds the max relative expiration", e);
    }
    // Test some giant limit values with add
    try {
        dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(Long.MAX_VALUE)).build());
        fail("Added a directive with a gigantic max value");
    } catch (IllegalArgumentException e) {
        assertExceptionContains("is too far in the future", e);
    }
    try {
        dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
        fail("Added a directive with a gigantic max value");
    } catch (InvalidRequestException e) {
        assertExceptionContains("is too far in the future", e);
    }
    // Test some giant limit values with modify
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.NEVER).build());
        fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
        fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("is too far in the future", e);
    }
    // Test that the max is enforced on modify correctly when changing pools
    CachePoolInfo destPool = new CachePoolInfo("destPool");
    dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2));
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).build());
        fail("Modified a directive to a pool with a lower max expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("exceeds the max relative expiration", e);
    }
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).setExpiration(Expiration.newRelative(poolExpiration / 2)).build());
    dirIt = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool(destPool.getPoolName()).build());
    listInfo = dirIt.next().getInfo();
    listExpiration = listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
    assertTrue("Unexpected relative expiry " + listExpiration + " expected approximately " + poolExpiration / 2, Math.abs(poolExpiration / 2 - listExpiration) < 10 * 1000);
    // Test that cache pool and directive expiry can be modified back to never
    dfs.modifyCachePool(destPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
    poolIt = dfs.listCachePools();
    listPool = poolIt.next().getInfo();
    while (!listPool.getPoolName().equals(destPool.getPoolName())) {
        listPool = poolIt.next().getInfo();
    }
    assertEquals("Expected max relative expiry to match set value", CachePoolInfo.RELATIVE_EXPIRY_NEVER, listPool.getMaxRelativeExpiryMs().longValue());
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER)).build());
    // Test modifying close to the limit
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1)).build());
}
Also used : Path(org.apache.hadoop.fs.Path) Date(java.util.Date) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Aggregations

CachePoolEntry (org.apache.hadoop.hdfs.protocol.CachePoolEntry)9 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)7 Test (org.junit.Test)5 Path (org.apache.hadoop.fs.Path)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)3 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)3 IOException (java.io.IOException)2 Date (java.util.Date)2 InvalidRequestException (org.apache.hadoop.fs.InvalidRequestException)2 CacheDirectiveEntry (org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry)2 CachePoolStats (org.apache.hadoop.hdfs.protocol.CachePoolStats)2 ServiceException (com.google.protobuf.ServiceException)1 HashSet (java.util.HashSet)1 CacheFlag (org.apache.hadoop.fs.CacheFlag)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 ListCachePoolsResponseProto (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto)1 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)1