Search in sources :

Example 21 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class MetricsAsserts method mockMetricsRecordBuilder.

public static MetricsRecordBuilder mockMetricsRecordBuilder() {
    final MetricsCollector mc = mock(MetricsCollector.class);
    MetricsRecordBuilder rb = mock(MetricsRecordBuilder.class, new Answer<Object>() {

        @Override
        public Object answer(InvocationOnMock invocation) {
            Object[] args = invocation.getArguments();
            StringBuilder sb = new StringBuilder();
            for (Object o : args) {
                if (sb.length() > 0)
                    sb.append(", ");
                sb.append(String.valueOf(o));
            }
            String methodName = invocation.getMethod().getName();
            LOG.debug(methodName + ": " + sb);
            return methodName.equals("parent") || methodName.equals("endRecord") ? mc : invocation.getMock();
        }
    });
    when(mc.addRecord(anyString())).thenReturn(rb);
    when(mc.addRecord(anyInfo())).thenReturn(rb);
    return rb;
}
Also used : MetricsCollector(org.apache.hadoop.metrics2.MetricsCollector) InvocationOnMock(org.mockito.invocation.InvocationOnMock) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Example 22 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class MetricsAsserts method assertQuantileGauges.

/**
   * Asserts that the NumOps and quantiles for a metric have been changed at
   * some point to a non-zero value.
   * 
   * @param prefix of the metric
   * @param rb MetricsRecordBuilder with the metric
   */
public static void assertQuantileGauges(String prefix, MetricsRecordBuilder rb) {
    verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0l));
    for (Quantile q : MutableQuantiles.quantiles) {
        String nameTemplate = prefix + "%dthPercentileLatency";
        int percentile = (int) (100 * q.quantile);
        verify(rb).addGauge(eqName(info(String.format(nameTemplate, percentile), "")), geq(0l));
    }
}
Also used : Quantile(org.apache.hadoop.metrics2.util.Quantile)

Example 23 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestBlockManager method testAsyncIBR.

// spam the block manager with IBRs to verify queuing is occurring.
@Test
public void testAsyncIBR() throws Exception {
    Logger.getRootLogger().setLevel(Level.WARN);
    // will create files with many small blocks.
    final int blkSize = 4 * 1024;
    final int fileSize = blkSize * 100;
    final byte[] buf = new byte[2 * blkSize];
    final int numWriters = 4;
    final int repl = 3;
    final CyclicBarrier barrier = new CyclicBarrier(numWriters);
    final CountDownLatch writeLatch = new CountDownLatch(numWriters);
    final AtomicBoolean failure = new AtomicBoolean();
    final Configuration conf = new HdfsConfiguration();
    conf.getLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, blkSize);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(8).build();
    try {
        cluster.waitActive();
        // create multiple writer threads to create a file with many blocks.
        // will test that concurrent writing causes IBR batching in the NN
        Thread[] writers = new Thread[numWriters];
        for (int i = 0; i < writers.length; i++) {
            final Path p = new Path("/writer" + i);
            writers[i] = new Thread(new Runnable() {

                @Override
                public void run() {
                    try {
                        FileSystem fs = cluster.getFileSystem();
                        FSDataOutputStream os = fs.create(p, true, buf.length, (short) repl, blkSize);
                        // align writers for maximum chance of IBR batching.
                        barrier.await();
                        int remaining = fileSize;
                        while (remaining > 0) {
                            os.write(buf);
                            remaining -= buf.length;
                        }
                        os.close();
                    } catch (Exception e) {
                        e.printStackTrace();
                        failure.set(true);
                    }
                    // let main thread know we are done.
                    writeLatch.countDown();
                }
            });
            writers[i].start();
        }
        // when and how many IBRs are queued is indeterminate, so just watch
        // the metrics and verify something was queued at during execution.
        boolean sawQueued = false;
        while (!writeLatch.await(10, TimeUnit.MILLISECONDS)) {
            assertFalse(failure.get());
            MetricsRecordBuilder rb = getMetrics("NameNodeActivity");
            long queued = MetricsAsserts.getIntGauge("BlockOpsQueued", rb);
            sawQueued |= (queued > 0);
        }
        assertFalse(failure.get());
        assertTrue(sawQueued);
        // verify that batching of the IBRs occurred.
        MetricsRecordBuilder rb = getMetrics("NameNodeActivity");
        long batched = MetricsAsserts.getLongCounter("BlockOpsBatched", rb);
        assertTrue(batched > 0);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) CountDownLatch(java.util.concurrent.CountDownLatch) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) TimeoutException(java.util.concurrent.TimeoutException) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) RemoteException(org.apache.hadoop.ipc.RemoteException) CyclicBarrier(java.util.concurrent.CyclicBarrier) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 24 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestStartupProgressMetrics method testRunningState.

@Test
public void testRunningState() {
    setStartupProgressForRunningState(startupProgress);
    MetricsRecordBuilder builder = getMetrics(metrics, true);
    assertTrue(getLongCounter("ElapsedTime", builder) >= 0L);
    assertGauge("PercentComplete", 0.375f, builder);
    assertCounter("LoadingFsImageCount", 100L, builder);
    assertTrue(getLongCounter("LoadingFsImageElapsedTime", builder) >= 0L);
    assertCounter("LoadingFsImageTotal", 100L, builder);
    assertGauge("LoadingFsImagePercentComplete", 1.0f, builder);
    assertCounter("LoadingEditsCount", 100L, builder);
    assertTrue(getLongCounter("LoadingEditsElapsedTime", builder) >= 0L);
    assertCounter("LoadingEditsTotal", 200L, builder);
    assertGauge("LoadingEditsPercentComplete", 0.5f, builder);
    assertCounter("SavingCheckpointCount", 0L, builder);
    assertCounter("SavingCheckpointElapsedTime", 0L, builder);
    assertCounter("SavingCheckpointTotal", 0L, builder);
    assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
    assertCounter("SafeModeCount", 0L, builder);
    assertCounter("SafeModeElapsedTime", 0L, builder);
    assertCounter("SafeModeTotal", 0L, builder);
    assertGauge("SafeModePercentComplete", 0.0f, builder);
}
Also used : MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 25 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestStartupProgressMetrics method testFinalState.

@Test
public void testFinalState() {
    setStartupProgressForFinalState(startupProgress);
    MetricsRecordBuilder builder = getMetrics(metrics, true);
    assertTrue(getLongCounter("ElapsedTime", builder) >= 0L);
    assertGauge("PercentComplete", 1.0f, builder);
    assertCounter("LoadingFsImageCount", 100L, builder);
    assertTrue(getLongCounter("LoadingFsImageElapsedTime", builder) >= 0L);
    assertCounter("LoadingFsImageTotal", 100L, builder);
    assertGauge("LoadingFsImagePercentComplete", 1.0f, builder);
    assertCounter("LoadingEditsCount", 200L, builder);
    assertTrue(getLongCounter("LoadingEditsElapsedTime", builder) >= 0L);
    assertCounter("LoadingEditsTotal", 200L, builder);
    assertGauge("LoadingEditsPercentComplete", 1.0f, builder);
    assertCounter("SavingCheckpointCount", 300L, builder);
    assertTrue(getLongCounter("SavingCheckpointElapsedTime", builder) >= 0L);
    assertCounter("SavingCheckpointTotal", 300L, builder);
    assertGauge("SavingCheckpointPercentComplete", 1.0f, builder);
    assertCounter("SafeModeCount", 400L, builder);
    assertTrue(getLongCounter("SafeModeElapsedTime", builder) >= 0L);
    assertCounter("SafeModeTotal", 400L, builder);
    assertGauge("SafeModePercentComplete", 1.0f, builder);
}
Also used : MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Aggregations

MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)99 Test (org.junit.Test)47 Path (org.apache.hadoop.fs.Path)20 Configuration (org.apache.hadoop.conf.Configuration)14 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)12 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 FileSystem (org.apache.hadoop.fs.FileSystem)8 MetricsInfo (org.apache.hadoop.metrics2.MetricsInfo)7 IOException (java.io.IOException)6 MetricsCollector (org.apache.hadoop.metrics2.MetricsCollector)6 MetricsSource (org.apache.hadoop.metrics2.MetricsSource)5 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Quantile (org.apache.hadoop.metrics2.util.Quantile)4 ServiceException (com.google.protobuf.ServiceException)3 InterruptedIOException (java.io.InterruptedIOException)2 GarbageCollectorMXBean (java.lang.management.GarbageCollectorMXBean)2 Map (java.util.Map)2 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)2 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)2