Search in sources :

Example 86 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class RollingAverages method snapshot.

@Override
public void snapshot(MetricsRecordBuilder builder, boolean all) {
    if (all || changed()) {
        for (final Entry<String, LinkedBlockingDeque<SumAndCount>> entry : averages.entrySet()) {
            final String name = entry.getKey();
            final MetricsInfo avgInfo = info(String.format(avgInfoNameTemplate, StringUtils.capitalize(name)), String.format(avgInfoDescTemplate, StringUtils.uncapitalize(name)));
            double totalSum = 0;
            long totalCount = 0;
            for (final SumAndCount sumAndCount : entry.getValue()) {
                totalCount += sumAndCount.getCount();
                totalSum += sumAndCount.getSum();
            }
            if (totalCount != 0) {
                builder.addGauge(avgInfo, totalSum / totalCount);
            }
        }
        if (changed()) {
            clearChanged();
        }
    }
}
Also used : MetricsInfo(org.apache.hadoop.metrics2.MetricsInfo) LinkedBlockingDeque(java.util.concurrent.LinkedBlockingDeque)

Example 87 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class DecayRpcScheduler method addTopNCallerSummary.

// Key: Caller(xyz).Volume and Caller(xyz).Priority
private void addTopNCallerSummary(MetricsRecordBuilder rb) {
    TopN topNCallers = getTopCallers(topUsersCount);
    Map<Object, Integer> decisions = scheduleCacheRef.get();
    final int actualCallerCount = topNCallers.size();
    for (int i = 0; i < actualCallerCount; i++) {
        NameValuePair entry = topNCallers.poll();
        String topCaller = "Caller(" + entry.getName() + ")";
        String topCallerVolume = topCaller + ".Volume";
        String topCallerPriority = topCaller + ".Priority";
        rb.addCounter(Interns.info(topCallerVolume, topCallerVolume), entry.getValue());
        Integer priority = decisions.get(entry.getName());
        if (priority != null) {
            rb.addCounter(Interns.info(topCallerPriority, topCallerPriority), priority);
        }
    }
}
Also used : NameValuePair(org.apache.hadoop.metrics2.util.Metrics2Util.NameValuePair) TopN(org.apache.hadoop.metrics2.util.Metrics2Util.TopN)

Example 88 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class DecayRpcScheduler method getMetrics.

@Override
public void getMetrics(MetricsCollector collector, boolean all) {
    // Metrics2 interface to act as a Metric source
    try {
        MetricsRecordBuilder rb = collector.addRecord(getClass().getName()).setContext(namespace);
        addDecayedCallVolume(rb);
        addUniqueIdentityCount(rb);
        addTopNCallerSummary(rb);
        addAvgResponseTimePerPriority(rb);
        addCallVolumePerPriority(rb);
        addRawCallVolume(rb);
    } catch (Exception e) {
        LOG.warn("Exception thrown while metric collection. Exception : " + e.getMessage());
    }
}
Also used : MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Example 89 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class MetricsSystemImpl method getMetrics.

@Override
public synchronized void getMetrics(MetricsCollector builder, boolean all) {
    MetricsRecordBuilder rb = builder.addRecord(MS_NAME).addGauge(MsInfo.NumActiveSources, sources.size()).addGauge(MsInfo.NumAllSources, allSources.size()).addGauge(MsInfo.NumActiveSinks, sinks.size()).addGauge(MsInfo.NumAllSinks, allSinks.size());
    for (MetricsSinkAdapter sa : sinks.values()) {
        sa.snapshot(rb, all);
    }
    registry.snapshot(rb, all);
}
Also used : MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Example 90 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestCheckpoint method testCheckpoint.

/**
   * Tests checkpoint in HDFS.
   */
@Test
public void testCheckpoint() throws IOException {
    Path tmpDir = new Path("/tmp_tmp");
    Path file1 = new Path("checkpoint.dat");
    Path file2 = new Path("checkpoint2.dat");
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    MiniDFSCluster cluster = null;
    FileSystem fileSys = null;
    SecondaryNameNode secondary = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
        cluster.waitActive();
        fileSys = cluster.getFileSystem();
        //
        // verify that 'format' really blew away all pre-existing files
        //
        assertTrue(!fileSys.exists(file1));
        assertTrue(!fileSys.exists(file2));
        //
        // Create file1
        //
        DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, replication, seed);
        checkFile(fileSys, file1, replication);
        for (int i = 0; i < 1000; i++) {
            fileSys.mkdirs(tmpDir);
            fileSys.delete(tmpDir, true);
        }
        //
        // Take a checkpoint
        //
        secondary = startSecondaryNameNode(conf);
        secondary.doCheckpoint();
        MetricsRecordBuilder rb = getMetrics(NN_METRICS);
        assertCounterGt("GetImageNumOps", 0, rb);
        assertCounterGt("GetEditNumOps", 0, rb);
        assertCounterGt("PutImageNumOps", 0, rb);
        assertGaugeGt("GetImageAvgTime", 0.0, rb);
        assertGaugeGt("GetEditAvgTime", 0.0, rb);
        assertGaugeGt("PutImageAvgTime", 0.0, rb);
    } finally {
        fileSys.close();
        cleanup(secondary);
        secondary = null;
        cleanup(cluster);
        cluster = null;
    }
    //
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        fileSys = cluster.getFileSystem();
        // check that file1 still exists
        checkFile(fileSys, file1, replication);
        cleanupFile(fileSys, file1);
        // create new file file2
        DFSTestUtil.createFile(fileSys, file2, fileSize, fileSize, blockSize, replication, seed);
        checkFile(fileSys, file2, replication);
        //
        // Take a checkpoint
        //
        secondary = startSecondaryNameNode(conf);
        secondary.doCheckpoint();
        FSDirectory secondaryFsDir = secondary.getFSNamesystem().dir;
        INode rootInMap = secondaryFsDir.getInode(secondaryFsDir.rootDir.getId());
        assertSame(rootInMap, secondaryFsDir.rootDir);
        fileSys.delete(tmpDir, true);
        fileSys.mkdirs(tmpDir);
        secondary.doCheckpoint();
    } finally {
        fileSys.close();
        cleanup(secondary);
        secondary = null;
        cleanup(cluster);
        cluster = null;
    }
    //
    // Restart cluster and verify that file2 exists and
    // file1 does not exist.
    //
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    assertTrue(!fileSys.exists(file1));
    assertTrue(fileSys.exists(tmpDir));
    try {
        // verify that file2 exists
        checkFile(fileSys, file2, replication);
    } finally {
        fileSys.close();
        cluster.shutdown();
        cluster = null;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Aggregations

MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)99 Test (org.junit.Test)47 Path (org.apache.hadoop.fs.Path)20 Configuration (org.apache.hadoop.conf.Configuration)14 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)12 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 FileSystem (org.apache.hadoop.fs.FileSystem)8 MetricsInfo (org.apache.hadoop.metrics2.MetricsInfo)7 IOException (java.io.IOException)6 MetricsCollector (org.apache.hadoop.metrics2.MetricsCollector)6 MetricsSource (org.apache.hadoop.metrics2.MetricsSource)5 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Quantile (org.apache.hadoop.metrics2.util.Quantile)4 ServiceException (com.google.protobuf.ServiceException)3 InterruptedIOException (java.io.InterruptedIOException)2 GarbageCollectorMXBean (java.lang.management.GarbageCollectorMXBean)2 Map (java.util.Map)2 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)2 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)2