Search in sources :

Example 26 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testNoBackingReplica.

@Test(timeout = 60000)
public void testNoBackingReplica() throws Exception {
    // Cache all three replicas for a file.
    final Path filename = new Path("/noback");
    final short replication = (short) 3;
    DFSTestUtil.createFile(dfs, filename, 1, replication, 0x0BAC);
    dfs.addCachePool(new CachePoolInfo("pool"));
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(filename).setReplication(replication).build());
    waitForCachedBlocks(namenode, 1, replication, "testNoBackingReplica:1");
    // Pause cache reports while we change the replication factor.
    // This will orphan some cached replicas.
    DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, true);
    try {
        dfs.setReplication(filename, (short) 1);
        DFSTestUtil.waitForReplication(dfs, filename, (short) 1, 30000);
        // The cache locations should drop down to 1 even without cache reports.
        waitForCachedBlocks(namenode, 1, (short) 1, "testNoBackingReplica:2");
    } finally {
        DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, false);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Example 27 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testListCachePoolPermissions.

@Test(timeout = 60000)
public void testListCachePoolPermissions() throws Exception {
    final UserGroupInformation myUser = UserGroupInformation.createRemoteUser("myuser");
    final DistributedFileSystem myDfs = (DistributedFileSystem) DFSTestUtil.getFileSystemAs(myUser, conf);
    final String poolName = "poolparty";
    dfs.addCachePool(new CachePoolInfo(poolName).setMode(new FsPermission((short) 0700)));
    // Should only see partial info
    RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
    CachePoolInfo info = it.next().getInfo();
    assertFalse(it.hasNext());
    assertEquals("Expected pool name", poolName, info.getPoolName());
    assertNull("Unexpected owner name", info.getOwnerName());
    assertNull("Unexpected group name", info.getGroupName());
    assertNull("Unexpected mode", info.getMode());
    assertNull("Unexpected limit", info.getLimit());
    // Modify the pool so myuser is now the owner
    final long limit = 99;
    dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(myUser.getShortUserName()).setLimit(limit));
    // Should see full info
    it = myDfs.listCachePools();
    info = it.next().getInfo();
    assertFalse(it.hasNext());
    assertEquals("Expected pool name", poolName, info.getPoolName());
    assertEquals("Mismatched owner name", myUser.getShortUserName(), info.getOwnerName());
    assertNotNull("Expected group name", info.getGroupName());
    assertEquals("Mismatched mode", (short) 0700, info.getMode().toShort());
    assertEquals("Mismatched limit", limit, (long) info.getLimit());
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 28 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testLimit.

@Test(timeout = 120000)
public void testLimit() throws Exception {
    try {
        dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l));
        fail("Should not be able to set a negative limit");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("negative", e);
    }
    final String destiny = "poolofdestiny";
    final Path path1 = new Path("/destiny");
    DFSTestUtil.createFile(dfs, path1, 2 * BLOCK_SIZE, (short) 1, 0x9494);
    // Start off with a limit that is too small
    final CachePoolInfo poolInfo = new CachePoolInfo(destiny).setLimit(2 * BLOCK_SIZE - 1);
    dfs.addCachePool(poolInfo);
    final CacheDirectiveInfo info1 = new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path1).build();
    try {
        dfs.addCacheDirective(info1);
        fail("Should not be able to cache when there is no more limit");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Raise the limit up to fit and it should work this time
    poolInfo.setLimit(2 * BLOCK_SIZE);
    dfs.modifyCachePool(poolInfo);
    long id1 = dfs.addCacheDirective(info1);
    waitForCachePoolStats(dfs, 2 * BLOCK_SIZE, 2 * BLOCK_SIZE, 1, 1, poolInfo, "testLimit:1");
    // Adding another file, it shouldn't be cached
    final Path path2 = new Path("/failure");
    DFSTestUtil.createFile(dfs, path2, BLOCK_SIZE, (short) 1, 0x9495);
    try {
        dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path2).build(), EnumSet.noneOf(CacheFlag.class));
        fail("Should not be able to add another cached file");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Bring the limit down, the first file should get uncached
    poolInfo.setLimit(BLOCK_SIZE);
    dfs.modifyCachePool(poolInfo);
    waitForCachePoolStats(dfs, 2 * BLOCK_SIZE, 0, 1, 0, poolInfo, "testLimit:2");
    RemoteIterator<CachePoolEntry> it = dfs.listCachePools();
    assertTrue("Expected a cache pool", it.hasNext());
    CachePoolStats stats = it.next().getStats();
    assertEquals("Overlimit bytes should be difference of needed and limit", BLOCK_SIZE, stats.getBytesOverlimit());
    // Moving a directive to a pool without enough limit should fail
    CachePoolInfo inadequate = new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE);
    dfs.addCachePool(inadequate);
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(), EnumSet.noneOf(CacheFlag.class));
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Succeeds when force=true
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(), EnumSet.of(CacheFlag.FORCE));
    // Also can add with force=true
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName()).setPath(path1).build(), EnumSet.of(CacheFlag.FORCE));
}
Also used : Path(org.apache.hadoop.fs.Path) CacheFlag(org.apache.hadoop.fs.CacheFlag) CachePoolStats(org.apache.hadoop.hdfs.protocol.CachePoolStats) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Example 29 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testCreateAndModifyPools.

@Test(timeout = 60000)
public void testCreateAndModifyPools() throws Exception {
    String poolName = "pool1";
    String ownerName = "abc";
    String groupName = "123";
    FsPermission mode = new FsPermission((short) 0755);
    long limit = 150;
    dfs.addCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
    RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
    CachePoolInfo info = iter.next().getInfo();
    assertEquals(poolName, info.getPoolName());
    assertEquals(ownerName, info.getOwnerName());
    assertEquals(groupName, info.getGroupName());
    ownerName = "def";
    groupName = "456";
    mode = new FsPermission((short) 0700);
    limit = 151;
    dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
    iter = dfs.listCachePools();
    info = iter.next().getInfo();
    assertEquals(poolName, info.getPoolName());
    assertEquals(ownerName, info.getOwnerName());
    assertEquals(groupName, info.getGroupName());
    assertEquals(mode, info.getMode());
    assertEquals(limit, (long) info.getLimit());
    dfs.removeCachePool(poolName);
    iter = dfs.listCachePools();
    assertFalse("expected no cache pools after deleting pool", iter.hasNext());
    proto.listCachePools(null);
    try {
        proto.removeCachePool("pool99");
        fail("expected to get an exception when " + "removing a non-existent pool.");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Cannot remove non-existent", ioe);
    }
    try {
        proto.removeCachePool(poolName);
        fail("expected to get an exception when " + "removing a non-existent pool.");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Cannot remove non-existent", ioe);
    }
    iter = dfs.listCachePools();
    assertFalse("expected no cache pools after deleting pool", iter.hasNext());
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Example 30 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testReplicationFactor.

/**
   * Tests stepping the cache replication factor up and down, checking the
   * number of cached replicas and blocks as well as the advertised locations.
   * @throws Exception
   */
@Test(timeout = 120000)
public void testReplicationFactor() throws Exception {
    // Create the pool
    final String pool = "friendlyPool";
    dfs.addCachePool(new CachePoolInfo(pool));
    // Create some test files
    final List<Path> paths = new LinkedList<Path>();
    paths.add(new Path("/foo/bar"));
    paths.add(new Path("/foo/baz"));
    paths.add(new Path("/foo2/bar2"));
    paths.add(new Path("/foo2/baz2"));
    dfs.mkdir(new Path("/foo"), FsPermission.getDirDefault());
    dfs.mkdir(new Path("/foo2"), FsPermission.getDirDefault());
    final int numBlocksPerFile = 2;
    for (Path path : paths) {
        FileSystemTestHelper.createFile(dfs, path, numBlocksPerFile, (int) BLOCK_SIZE, (short) 3, false);
    }
    waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:0");
    checkNumCachedReplicas(dfs, paths, 0, 0);
    // cache directory
    long id = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foo")).setReplication((short) 1).setPool(pool).build());
    waitForCachedBlocks(namenode, 4, 4, "testReplicationFactor:1");
    checkNumCachedReplicas(dfs, paths, 4, 4);
    // step up the replication factor
    for (int i = 2; i <= 3; i++) {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setReplication((short) i).build());
        waitForCachedBlocks(namenode, 4, 4 * i, "testReplicationFactor:2");
        checkNumCachedReplicas(dfs, paths, 4, 4 * i);
    }
    // step it down
    for (int i = 2; i >= 1; i--) {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setReplication((short) i).build());
        waitForCachedBlocks(namenode, 4, 4 * i, "testReplicationFactor:3");
        checkNumCachedReplicas(dfs, paths, 4, 4 * i);
    }
    // remove and watch numCached go to 0
    dfs.removeCacheDirective(id);
    waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:4");
    checkNumCachedReplicas(dfs, paths, 0, 0);
}
Also used : Path(org.apache.hadoop.fs.Path) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) LinkedList(java.util.LinkedList) Test(org.junit.Test)

Aggregations

CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)36 Test (org.junit.Test)26 Path (org.apache.hadoop.fs.Path)20 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)20 FsPermission (org.apache.hadoop.fs.permission.FsPermission)14 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)14 IOException (java.io.IOException)11 AccessControlException (org.apache.hadoop.security.AccessControlException)8 CachePoolEntry (org.apache.hadoop.hdfs.protocol.CachePoolEntry)7 CacheDirectiveEntry (org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry)6 InvalidRequestException (org.apache.hadoop.fs.InvalidRequestException)4 ByteBuffer (java.nio.ByteBuffer)3 Date (java.util.Date)3 LinkedList (java.util.LinkedList)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 HashSet (java.util.HashSet)2 Configuration (org.apache.hadoop.conf.Configuration)2 CacheFlag (org.apache.hadoop.fs.CacheFlag)2 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)2