Search in sources :

Example 6 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class PBHelperClient method convert.

public static CachePoolEntry convert(CachePoolEntryProto proto) {
    CachePoolInfo info = convert(proto.getInfo());
    CachePoolStats stats = convert(proto.getStats());
    return new CachePoolEntry(info, stats);
}
Also used : CachePoolStats(org.apache.hadoop.hdfs.protocol.CachePoolStats) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry)

Example 7 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class PBHelperClient method convert.

public static CachePoolInfo convert(CachePoolInfoProto proto) {
    // Pool name is a required field, the rest are optional
    String poolName = Preconditions.checkNotNull(proto.getPoolName());
    CachePoolInfo info = new CachePoolInfo(poolName);
    if (proto.hasOwnerName()) {
        info.setOwnerName(proto.getOwnerName());
    }
    if (proto.hasGroupName()) {
        info.setGroupName(proto.getGroupName());
    }
    if (proto.hasMode()) {
        info.setMode(new FsPermission((short) proto.getMode()));
    }
    if (proto.hasLimit()) {
        info.setLimit(proto.getLimit());
    }
    if (proto.hasDefaultReplication()) {
        info.setDefaultReplication(Shorts.checkedCast(proto.getDefaultReplication()));
    }
    if (proto.hasMaxRelativeExpiry()) {
        info.setMaxRelativeExpiryMs(proto.getMaxRelativeExpiry());
    }
    return info;
}
Also used : ByteString(com.google.protobuf.ByteString) FsPermission(org.apache.hadoop.fs.permission.FsPermission) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo)

Example 8 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testExceedsCapacity.

@Test(timeout = 60000)
public void testExceedsCapacity() throws Exception {
    // Create a giant file
    final Path fileName = new Path("/exceeds");
    final long fileLen = CACHE_CAPACITY * (NUM_DATANODES * 2);
    int numCachedReplicas = (int) ((CACHE_CAPACITY * NUM_DATANODES) / BLOCK_SIZE);
    DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES, 0xFADED);
    dfs.addCachePool(new CachePoolInfo("pool"));
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(fileName).setReplication((short) 1).build());
    waitForCachedBlocks(namenode, -1, numCachedReplicas, "testExceeds:1");
    checkPendingCachedEmpty(cluster);
    checkPendingCachedEmpty(cluster);
    // Try creating a file with giant-sized blocks that exceed cache capacity
    dfs.delete(fileName, false);
    DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2, (short) 1, 0xFADED);
    checkPendingCachedEmpty(cluster);
    checkPendingCachedEmpty(cluster);
}
Also used : Path(org.apache.hadoop.fs.Path) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Example 9 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testWaitForCachedReplicasInDirectory.

@Test(timeout = 120000)
public void testWaitForCachedReplicasInDirectory() throws Exception {
    // Create the pool
    final String pool = "friendlyPool";
    final CachePoolInfo poolInfo = new CachePoolInfo(pool);
    dfs.addCachePool(poolInfo);
    // Create some test files
    final List<Path> paths = new LinkedList<Path>();
    paths.add(new Path("/foo/bar"));
    paths.add(new Path("/foo/baz"));
    paths.add(new Path("/foo2/bar2"));
    paths.add(new Path("/foo2/baz2"));
    dfs.mkdir(new Path("/foo"), FsPermission.getDirDefault());
    dfs.mkdir(new Path("/foo2"), FsPermission.getDirDefault());
    final int numBlocksPerFile = 2;
    for (Path path : paths) {
        FileSystemTestHelper.createFile(dfs, path, numBlocksPerFile, (int) BLOCK_SIZE, (short) 3, false);
    }
    waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicasInDirectory:0");
    // cache entire directory
    long id = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foo")).setReplication((short) 2).setPool(pool).build());
    waitForCachedBlocks(namenode, 4, 8, "testWaitForCachedReplicasInDirectory:1:blocks");
    // Verify that listDirectives gives the stats we want.
    waitForCacheDirectiveStats(dfs, 4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE, 2, 2, new CacheDirectiveInfo.Builder().setPath(new Path("/foo")).build(), "testWaitForCachedReplicasInDirectory:1:directive");
    waitForCachePoolStats(dfs, 4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE, 2, 2, poolInfo, "testWaitForCachedReplicasInDirectory:1:pool");
    long id2 = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foo/bar")).setReplication((short) 4).setPool(pool).build());
    // wait for an additional 2 cached replicas to come up
    waitForCachedBlocks(namenode, 4, 10, "testWaitForCachedReplicasInDirectory:2:blocks");
    // the directory directive's stats are unchanged
    waitForCacheDirectiveStats(dfs, 4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE, 2, 2, new CacheDirectiveInfo.Builder().setPath(new Path("/foo")).build(), "testWaitForCachedReplicasInDirectory:2:directive-1");
    // verify /foo/bar's stats
    waitForCacheDirectiveStats(dfs, 4 * numBlocksPerFile * BLOCK_SIZE, // only 3 because the file only has 3 replicas, not 4 as requested.
    3 * numBlocksPerFile * BLOCK_SIZE, 1, // only 0 because the file can't be fully cached
    0, new CacheDirectiveInfo.Builder().setPath(new Path("/foo/bar")).build(), "testWaitForCachedReplicasInDirectory:2:directive-2");
    waitForCachePoolStats(dfs, (4 + 4) * numBlocksPerFile * BLOCK_SIZE, (4 + 3) * numBlocksPerFile * BLOCK_SIZE, 3, 2, poolInfo, "testWaitForCachedReplicasInDirectory:2:pool");
    // remove and watch numCached go to 0
    dfs.removeCacheDirective(id);
    dfs.removeCacheDirective(id2);
    waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicasInDirectory:3:blocks");
    waitForCachePoolStats(dfs, 0, 0, 0, 0, poolInfo, "testWaitForCachedReplicasInDirectory:3:pool");
}
Also used : Path(org.apache.hadoop.fs.Path) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) LinkedList(java.util.LinkedList) Test(org.junit.Test)

Example 10 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testCacheManagerRestart.

@Test(timeout = 60000)
public void testCacheManagerRestart() throws Exception {
    SecondaryNameNode secondary = null;
    try {
        // Start a secondary namenode
        conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
        secondary = new SecondaryNameNode(conf);
        // Create and validate a pool
        final String pool = "poolparty";
        String groupName = "partygroup";
        FsPermission mode = new FsPermission((short) 0777);
        long limit = 747;
        dfs.addCachePool(new CachePoolInfo(pool).setGroupName(groupName).setMode(mode).setLimit(limit));
        RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
        assertTrue("No cache pools found", pit.hasNext());
        CachePoolInfo info = pit.next().getInfo();
        assertEquals(pool, info.getPoolName());
        assertEquals(groupName, info.getGroupName());
        assertEquals(mode, info.getMode());
        assertEquals(limit, (long) info.getLimit());
        assertFalse("Unexpected # of cache pools found", pit.hasNext());
        // Create some cache entries
        int numEntries = 10;
        String entryPrefix = "/party-";
        long prevId = -1;
        final Date expiry = new Date();
        for (int i = 0; i < numEntries; i++) {
            prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path(entryPrefix + i)).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).build());
        }
        RemoteIterator<CacheDirectiveEntry> dit = dfs.listCacheDirectives(null);
        for (int i = 0; i < numEntries; i++) {
            assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
            CacheDirectiveInfo cd = dit.next().getInfo();
            assertEquals(i + 1, cd.getId().longValue());
            assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
            assertEquals(pool, cd.getPool());
        }
        assertFalse("Unexpected # of cache directives found", dit.hasNext());
        // Checkpoint once to set some cache pools and directives on 2NN side
        secondary.doCheckpoint();
        // Add some more CacheManager state
        final String imagePool = "imagePool";
        dfs.addCachePool(new CachePoolInfo(imagePool));
        prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/image")).setPool(imagePool).build());
        // Save a new image to force a fresh fsimage download
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        dfs.saveNamespace();
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // Checkpoint again forcing a reload of FSN state
        boolean fetchImage = secondary.doCheckpoint();
        assertTrue("Secondary should have fetched a new fsimage from NameNode", fetchImage);
        // Remove temp pool and directive
        dfs.removeCachePool(imagePool);
        // Restart namenode
        cluster.restartNameNode();
        // Check that state came back up
        pit = dfs.listCachePools();
        assertTrue("No cache pools found", pit.hasNext());
        info = pit.next().getInfo();
        assertEquals(pool, info.getPoolName());
        assertEquals(pool, info.getPoolName());
        assertEquals(groupName, info.getGroupName());
        assertEquals(mode, info.getMode());
        assertEquals(limit, (long) info.getLimit());
        assertFalse("Unexpected # of cache pools found", pit.hasNext());
        dit = dfs.listCacheDirectives(null);
        for (int i = 0; i < numEntries; i++) {
            assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
            CacheDirectiveInfo cd = dit.next().getInfo();
            assertEquals(i + 1, cd.getId().longValue());
            assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
            assertEquals(pool, cd.getPool());
            assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
        }
        assertFalse("Unexpected # of cache directives found", dit.hasNext());
        long nextId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foobar")).setPool(pool).build());
        assertEquals(prevId + 1, nextId);
    } finally {
        if (secondary != null) {
            secondary.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Date(java.util.Date) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) FsPermission(org.apache.hadoop.fs.permission.FsPermission) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Aggregations

CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)36 Test (org.junit.Test)26 Path (org.apache.hadoop.fs.Path)20 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)20 FsPermission (org.apache.hadoop.fs.permission.FsPermission)14 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)14 IOException (java.io.IOException)11 AccessControlException (org.apache.hadoop.security.AccessControlException)8 CachePoolEntry (org.apache.hadoop.hdfs.protocol.CachePoolEntry)7 CacheDirectiveEntry (org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry)6 InvalidRequestException (org.apache.hadoop.fs.InvalidRequestException)4 ByteBuffer (java.nio.ByteBuffer)3 Date (java.util.Date)3 LinkedList (java.util.LinkedList)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 HashSet (java.util.HashSet)2 Configuration (org.apache.hadoop.conf.Configuration)2 CacheFlag (org.apache.hadoop.fs.CacheFlag)2 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)2