Search in sources :

Example 36 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestReplication method assertNoReplicationWasPerformed.

private void assertNoReplicationWasPerformed(MiniDFSCluster cluster) {
    for (DataNode dn : cluster.getDataNodes()) {
        MetricsRecordBuilder rb = getMetrics(dn.getMetrics().name());
        assertCounter("BlocksReplicated", 0L, rb);
    }
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Example 37 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestFsDatasetCache method testReCacheAfterUncache.

@Test(timeout = 60000)
public void testReCacheAfterUncache() throws Exception {
    final int TOTAL_BLOCKS_PER_CACHE = Ints.checkedCast(CACHE_CAPACITY / BLOCK_SIZE);
    BlockReaderTestUtil.enableHdfsCachingTracing();
    Assert.assertEquals(0, CACHE_CAPACITY % BLOCK_SIZE);
    // Create a small file
    final Path SMALL_FILE = new Path("/smallFile");
    DFSTestUtil.createFile(fs, SMALL_FILE, BLOCK_SIZE, (short) 1, 0xcafe);
    // Create a file that will take up the whole cache
    final Path BIG_FILE = new Path("/bigFile");
    DFSTestUtil.createFile(fs, BIG_FILE, TOTAL_BLOCKS_PER_CACHE * BLOCK_SIZE, (short) 1, 0xbeef);
    final DistributedFileSystem dfs = cluster.getFileSystem();
    dfs.addCachePool(new CachePoolInfo("pool"));
    final long bigCacheDirectiveId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(BIG_FILE).setReplication((short) 1).build());
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
            long blocksCached = MetricsAsserts.getLongCounter("BlocksCached", dnMetrics);
            if (blocksCached != TOTAL_BLOCKS_PER_CACHE) {
                LOG.info("waiting for " + TOTAL_BLOCKS_PER_CACHE + " to " + "be cached.   Right now only " + blocksCached + " blocks are cached.");
                return false;
            }
            LOG.info(TOTAL_BLOCKS_PER_CACHE + " blocks are now cached.");
            return true;
        }
    }, 1000, 30000);
    // Try to cache a smaller file.  It should fail.
    final long shortCacheDirectiveId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(SMALL_FILE).setReplication((short) 1).build());
    Thread.sleep(10000);
    MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
    Assert.assertEquals(TOTAL_BLOCKS_PER_CACHE, MetricsAsserts.getLongCounter("BlocksCached", dnMetrics));
    // Uncache the big file and verify that the small file can now be
    // cached (regression test for HDFS-6107)
    dfs.removeCacheDirective(bigCacheDirectiveId);
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            RemoteIterator<CacheDirectiveEntry> iter;
            try {
                iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().build());
                CacheDirectiveEntry entry;
                do {
                    entry = iter.next();
                } while (entry.getInfo().getId() != shortCacheDirectiveId);
                if (entry.getStats().getFilesCached() != 1) {
                    LOG.info("waiting for directive " + shortCacheDirectiveId + " to be cached.  stats = " + entry.getStats());
                    return false;
                }
                LOG.info("directive " + shortCacheDirectiveId + " has been cached.");
            } catch (IOException e) {
                Assert.fail("unexpected exception" + e.toString());
            }
            return true;
        }
    }, 1000, 30000);
    dfs.removeCacheDirective(shortCacheDirectiveId);
}
Also used : Path(org.apache.hadoop.fs.Path) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RemoteIterator(org.apache.hadoop.fs.RemoteIterator) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) Matchers.anyBoolean(org.mockito.Matchers.anyBoolean) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 38 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestFsDatasetCache method testCacheAndUncacheBlock.

private void testCacheAndUncacheBlock() throws Exception {
    LOG.info("beginning testCacheAndUncacheBlock");
    final int NUM_BLOCKS = 5;
    DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
    assertEquals(0, fsd.getNumBlocksCached());
    // Write a test file
    final Path testFile = new Path("/testCacheBlock");
    final long testFileLen = BLOCK_SIZE * NUM_BLOCKS;
    DFSTestUtil.createFile(fs, testFile, testFileLen, (short) 1, 0xABBAl);
    // Get the details of the written file
    HdfsBlockLocation[] locs = (HdfsBlockLocation[]) fs.getFileBlockLocations(testFile, 0, testFileLen);
    assertEquals("Unexpected number of blocks", NUM_BLOCKS, locs.length);
    final long[] blockSizes = getBlockSizes(locs);
    // Check initial state
    final long cacheCapacity = fsd.getCacheCapacity();
    long cacheUsed = fsd.getCacheUsed();
    long current = 0;
    assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
    assertEquals("Unexpected amount of cache used", current, cacheUsed);
    MetricsRecordBuilder dnMetrics;
    long numCacheCommands = 0;
    long numUncacheCommands = 0;
    // Cache each block in succession, checking each time
    for (int i = 0; i < NUM_BLOCKS; i++) {
        setHeartbeatResponse(cacheBlock(locs[i]));
        current = DFSTestUtil.verifyExpectedCacheUsage(current + blockSizes[i], i + 1, fsd);
        dnMetrics = getMetrics(dn.getMetrics().name());
        long cmds = MetricsAsserts.getLongCounter("BlocksCached", dnMetrics);
        assertTrue("Expected more cache requests from the NN (" + cmds + " <= " + numCacheCommands + ")", cmds > numCacheCommands);
        numCacheCommands = cmds;
    }
    // Uncache each block in succession, again checking each time
    for (int i = 0; i < NUM_BLOCKS; i++) {
        setHeartbeatResponse(uncacheBlock(locs[i]));
        current = DFSTestUtil.verifyExpectedCacheUsage(current - blockSizes[i], NUM_BLOCKS - 1 - i, fsd);
        dnMetrics = getMetrics(dn.getMetrics().name());
        long cmds = MetricsAsserts.getLongCounter("BlocksUncached", dnMetrics);
        assertTrue("Expected more uncache requests from the NN", cmds > numUncacheCommands);
        numUncacheCommands = cmds;
    }
    LOG.info("finishing testCacheAndUncacheBlock");
}
Also used : Path(org.apache.hadoop.fs.Path) HdfsBlockLocation(org.apache.hadoop.fs.HdfsBlockLocation) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Example 39 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestFsDatasetCache method testUncacheQuiesces.

@Test(timeout = 60000)
public void testUncacheQuiesces() throws Exception {
    // Create a file
    Path fileName = new Path("/testUncacheQuiesces");
    int fileLen = 4096;
    DFSTestUtil.createFile(fs, fileName, fileLen, (short) 1, 0xFDFD);
    // Cache it
    DistributedFileSystem dfs = cluster.getFileSystem();
    dfs.addCachePool(new CachePoolInfo("pool"));
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(fileName).setReplication((short) 3).build());
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
            long blocksCached = MetricsAsserts.getLongCounter("BlocksCached", dnMetrics);
            return blocksCached > 0;
        }
    }, 1000, 30000);
    // Uncache it
    dfs.removeCacheDirective(1);
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
            long blocksUncached = MetricsAsserts.getLongCounter("BlocksUncached", dnMetrics);
            return blocksUncached > 0;
        }
    }, 1000, 30000);
    // Make sure that no additional messages were sent
    Thread.sleep(10000);
    MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
    MetricsAsserts.assertCounter("BlocksCached", 1l, dnMetrics);
    MetricsAsserts.assertCounter("BlocksUncached", 1l, dnMetrics);
}
Also used : Path(org.apache.hadoop.fs.Path) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Matchers.anyBoolean(org.mockito.Matchers.anyBoolean) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 40 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestProtoBufRpc method testProtoBufRpc2.

@Test(timeout = 5000)
public void testProtoBufRpc2() throws Exception {
    TestRpcService2 client = getClient2();
    // Test ping method
    client.ping2(null, newEmptyRequest());
    // Test echo method
    EchoResponseProto echoResponse = client.echo2(null, newEchoRequest("hello"));
    Assert.assertEquals(echoResponse.getMessage(), "hello");
    // Ensure RPC metrics are updated
    MetricsRecordBuilder rpcMetrics = getMetrics(server.getRpcMetrics().name());
    assertCounterGt("RpcQueueTimeNumOps", 0L, rpcMetrics);
    assertCounterGt("RpcProcessingTimeNumOps", 0L, rpcMetrics);
    MetricsRecordBuilder rpcDetailedMetrics = getMetrics(server.getRpcDetailedMetrics().name());
    assertCounterGt("Echo2NumOps", 0L, rpcDetailedMetrics);
}
Also used : EchoResponseProto(org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Aggregations

MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)99 Test (org.junit.Test)47 Path (org.apache.hadoop.fs.Path)20 Configuration (org.apache.hadoop.conf.Configuration)14 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)12 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 FileSystem (org.apache.hadoop.fs.FileSystem)8 MetricsInfo (org.apache.hadoop.metrics2.MetricsInfo)7 IOException (java.io.IOException)6 MetricsCollector (org.apache.hadoop.metrics2.MetricsCollector)6 MetricsSource (org.apache.hadoop.metrics2.MetricsSource)5 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Quantile (org.apache.hadoop.metrics2.util.Quantile)4 ServiceException (com.google.protobuf.ServiceException)3 InterruptedIOException (java.io.InterruptedIOException)2 GarbageCollectorMXBean (java.lang.management.GarbageCollectorMXBean)2 Map (java.util.Map)2 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)2 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)2