Search in sources :

Example 31 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testWaitForCachedReplicas.

@Test(timeout = 120000)
public void testWaitForCachedReplicas() throws Exception {
    FileSystemTestHelper helper = new FileSystemTestHelper();
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            return ((namenode.getNamesystem().getCacheCapacity() == (NUM_DATANODES * CACHE_CAPACITY)) && (namenode.getNamesystem().getCacheUsed() == 0));
        }
    }, 500, 60000);
    // Send a cache report referring to a bogus block.  It is important that
    // the NameNode be robust against this.
    NamenodeProtocols nnRpc = namenode.getRpcServer();
    DataNode dn0 = cluster.getDataNodes().get(0);
    String bpid = cluster.getNamesystem().getBlockPoolId();
    LinkedList<Long> bogusBlockIds = new LinkedList<Long>();
    bogusBlockIds.add(999999L);
    nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);
    Path rootDir = helper.getDefaultWorkingDirectory(dfs);
    // Create the pool
    final String pool = "friendlyPool";
    nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
    // Create some test files
    final int numFiles = 2;
    final int numBlocksPerFile = 2;
    final List<String> paths = new ArrayList<String>(numFiles);
    for (int i = 0; i < numFiles; i++) {
        Path p = new Path(rootDir, "testCachePaths-" + i);
        FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile, (int) BLOCK_SIZE);
        paths.add(p.toUri().getPath());
    }
    // Check the initial statistics at the namenode
    waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
    // Cache and check each path in sequence
    int expected = 0;
    for (int i = 0; i < numFiles; i++) {
        CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().setPath(new Path(paths.get(i))).setPool(pool).build();
        nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
        expected += numBlocksPerFile;
        waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:1");
    }
    // Check that the datanodes have the right cache values
    DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
    assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
    long totalUsed = 0;
    for (DatanodeInfo dn : live) {
        final long cacheCapacity = dn.getCacheCapacity();
        final long cacheUsed = dn.getCacheUsed();
        final long cacheRemaining = dn.getCacheRemaining();
        assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
        assertEquals("Capacity not equal to used + remaining", cacheCapacity, cacheUsed + cacheRemaining);
        assertEquals("Remaining not equal to capacity - used", cacheCapacity - cacheUsed, cacheRemaining);
        totalUsed += cacheUsed;
    }
    assertEquals(expected * BLOCK_SIZE, totalUsed);
    // Uncache and check each path in sequence
    RemoteIterator<CacheDirectiveEntry> entries = new CacheDirectiveIterator(nnRpc, null, FsTracer.get(conf));
    for (int i = 0; i < numFiles; i++) {
        CacheDirectiveEntry entry = entries.next();
        nnRpc.removeCacheDirective(entry.getInfo().getId());
        expected -= numBlocksPerFile;
        waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:2");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) CacheFlag(org.apache.hadoop.fs.CacheFlag) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) FileSystemTestHelper(org.apache.hadoop.fs.FileSystemTestHelper) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) CacheDirectiveIterator(org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Example 32 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testAddRemoveDirectives.

@Test(timeout = 60000)
public void testAddRemoveDirectives() throws Exception {
    proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short) 0777)));
    proto.addCachePool(new CachePoolInfo("pool2").setMode(new FsPermission((short) 0777)));
    proto.addCachePool(new CachePoolInfo("pool3").setMode(new FsPermission((short) 0777)));
    proto.addCachePool(new CachePoolInfo("pool4").setMode(new FsPermission((short) 0)));
    CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
    CacheDirectiveInfo beta = new CacheDirectiveInfo.Builder().setPath(new Path("/beta")).setPool("pool2").build();
    CacheDirectiveInfo delta = new CacheDirectiveInfo.Builder().setPath(new Path("/delta")).setPool("pool1").build();
    long alphaId = addAsUnprivileged(alpha);
    long alphaId2 = addAsUnprivileged(alpha);
    assertFalse("Expected to get unique directives when re-adding an " + "existing CacheDirectiveInfo", alphaId == alphaId2);
    long betaId = addAsUnprivileged(beta);
    try {
        addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/unicorn")).setPool("no_such_pool").build());
        fail("expected an error when adding to a non-existent pool.");
    } catch (InvalidRequestException ioe) {
        GenericTestUtils.assertExceptionContains("Unknown pool", ioe);
    }
    try {
        addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/blackhole")).setPool("pool4").build());
        fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone).");
    } catch (AccessControlException e) {
        GenericTestUtils.assertExceptionContains("Permission denied while accessing pool", e);
    }
    try {
        addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/illegal:path/")).setPool("pool1").build());
        fail("expected an error when adding a malformed path " + "to the cache directives.");
    } catch (IllegalArgumentException e) {
        GenericTestUtils.assertExceptionContains("is not a valid DFS filename", e);
    }
    try {
        addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/emptypoolname")).setReplication((short) 1).setPool("").build());
        fail("expected an error when adding a cache " + "directive with an empty pool name.");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("Invalid empty pool name", e);
    }
    long deltaId = addAsUnprivileged(delta);
    // We expect the following to succeed, because DistributedFileSystem
    // qualifies the path.
    long relativeId = addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("relative")).setPool("pool1").build());
    RemoteIterator<CacheDirectiveEntry> iter;
    iter = dfs.listCacheDirectives(null);
    validateListAll(iter, alphaId, alphaId2, betaId, deltaId, relativeId);
    iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool3").build());
    assertFalse(iter.hasNext());
    iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool1").build());
    validateListAll(iter, alphaId, alphaId2, deltaId, relativeId);
    iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
    validateListAll(iter, betaId);
    iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(alphaId2).build());
    validateListAll(iter, alphaId2);
    iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(relativeId).build());
    validateListAll(iter, relativeId);
    dfs.removeCacheDirective(betaId);
    iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
    assertFalse(iter.hasNext());
    try {
        dfs.removeCacheDirective(betaId);
        fail("expected an error when removing a non-existent ID");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("No directive with ID", e);
    }
    try {
        proto.removeCacheDirective(-42l);
        fail("expected an error when removing a negative ID");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("Invalid negative ID", e);
    }
    try {
        proto.removeCacheDirective(43l);
        fail("expected an error when removing a non-existent ID");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("No directive with ID", e);
    }
    dfs.removeCacheDirective(alphaId);
    dfs.removeCacheDirective(alphaId2);
    dfs.removeCacheDirective(deltaId);
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(relativeId).setReplication((short) 555).build());
    iter = dfs.listCacheDirectives(null);
    assertTrue(iter.hasNext());
    CacheDirectiveInfo modified = iter.next().getInfo();
    assertEquals(relativeId, modified.getId().longValue());
    assertEquals((short) 555, modified.getReplication().shortValue());
    dfs.removeCacheDirective(relativeId);
    iter = dfs.listCacheDirectives(null);
    assertFalse(iter.hasNext());
    // Verify that PBCDs with path "." work correctly
    CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().setPath(new Path(".")).setPool("pool1").build();
    long id = dfs.addCacheDirective(directive);
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(directive).setId(id).setReplication((short) 2).build());
    dfs.removeCacheDirective(id);
    // Perform cache directive operations using a closed file system.
    DistributedFileSystem dfs1 = (DistributedFileSystem) cluster.getNewFileSystemInstance(0);
    dfs1.close();
    try {
        dfs1.listCacheDirectives(null);
        fail("listCacheDirectives using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
    try {
        dfs1.addCacheDirective(alpha);
        fail("addCacheDirective using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
    try {
        dfs1.modifyCacheDirective(alpha);
        fail("modifyCacheDirective using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
    try {
        dfs1.removeCacheDirective(alphaId);
        fail("removeCacheDirective using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Example 33 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestCacheDirectives method testExpiry.

@Test(timeout = 120000)
public void testExpiry() throws Exception {
    String pool = "pool1";
    dfs.addCachePool(new CachePoolInfo(pool));
    Path p = new Path("/mypath");
    DFSTestUtil.createFile(dfs, p, BLOCK_SIZE * 2, (short) 2, 0x999);
    // Expire after test timeout
    Date start = new Date();
    Date expiry = DateUtils.addSeconds(start, 120);
    final long id = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(p).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry)).setReplication((short) 2).build());
    waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:1");
    // Change it to expire sooner
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(0)).build());
    waitForCachedBlocks(cluster.getNameNode(), 0, 0, "testExpiry:2");
    RemoteIterator<CacheDirectiveEntry> it = dfs.listCacheDirectives(null);
    CacheDirectiveEntry ent = it.next();
    assertFalse(it.hasNext());
    Date entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
    assertTrue("Directive should have expired", entryExpiry.before(new Date()));
    // Change it back to expire later
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(120000)).build());
    waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:3");
    it = dfs.listCacheDirectives(null);
    ent = it.next();
    assertFalse(it.hasNext());
    entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
    assertTrue("Directive should not have expired", entryExpiry.after(new Date()));
    // Verify that setting a negative TTL throws an error
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(-1)).build());
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("Cannot set a negative expiration", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Date(java.util.Date) Test(org.junit.Test)

Example 34 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestAuditLoggerWithCommands method testAddCacheDirective.

@Test
public void testAddCacheDirective() throws Exception {
    removeExistingCachePools(null);
    proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short) 0)));
    CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
    fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
    try {
        ((DistributedFileSystem) fileSys).addCacheDirective(alpha);
        fail("The operation should have failed with AccessControlException");
    } catch (AccessControlException ace) {
    }
    String aceAddCachePattern = ".*allowed=false.*ugi=theDoctor.*cmd=addCache.*";
    int length = verifyAuditLogs(aceAddCachePattern);
    try {
        fileSys.close();
        ((DistributedFileSystem) fileSys).addCacheDirective(alpha);
        fail("The operation should have failed with IOException");
    } catch (IOException e) {
    }
    assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
Also used : Path(org.apache.hadoop.fs.Path) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Example 35 with CachePoolInfo

use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.

the class TestAuditLoggerWithCommands method testModifyCacheDirective.

@Test
public void testModifyCacheDirective() throws Exception {
    removeExistingCachePools(null);
    proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short) 0)));
    CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
    fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
    Long id = ((DistributedFileSystem) fs).addCacheDirective(alpha);
    try {
        ((DistributedFileSystem) fileSys).modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setReplication((short) 1).build());
        fail("The operation should have failed with AccessControlException");
    } catch (AccessControlException ace) {
    }
    String aceModifyCachePattern = ".*allowed=false.*ugi=theDoctor.*cmd=modifyCache.*";
    verifyAuditLogs(aceModifyCachePattern);
    fileSys.close();
    try {
        ((DistributedFileSystem) fileSys).modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setReplication((short) 1).build());
        fail("The operation should have failed with IOException");
    } catch (IOException e) {
    }
}
Also used : Path(org.apache.hadoop.fs.Path) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Aggregations

CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)36 Test (org.junit.Test)26 Path (org.apache.hadoop.fs.Path)20 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)20 FsPermission (org.apache.hadoop.fs.permission.FsPermission)14 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)14 IOException (java.io.IOException)11 AccessControlException (org.apache.hadoop.security.AccessControlException)8 CachePoolEntry (org.apache.hadoop.hdfs.protocol.CachePoolEntry)7 CacheDirectiveEntry (org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry)6 InvalidRequestException (org.apache.hadoop.fs.InvalidRequestException)4 ByteBuffer (java.nio.ByteBuffer)3 Date (java.util.Date)3 LinkedList (java.util.LinkedList)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 HashSet (java.util.HashSet)2 Configuration (org.apache.hadoop.conf.Configuration)2 CacheFlag (org.apache.hadoop.fs.CacheFlag)2 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)2