Search in sources :

Example 51 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hive by apache.

the class LlapDaemonExecutorMetrics method getExecutorStats.

private void getExecutorStats(MetricsRecordBuilder rb) {
    updateThreadMetrics(rb);
    final int totalConfiguredSlots = waitQueueSizeConfigured + numExecutorsConfigured;
    final int totalSlots = waitQueueSize.value() + numExecutors.value();
    final int slotsAvailableInQueue = waitQueueSize.value() - executorNumQueuedRequests.value();
    final int slotsAvailableTotal = slotsAvailableInQueue + numExecutorsAvailable.value();
    final float slotsAvailablePercent = totalSlots <= 0 ? 0.0f : (float) slotsAvailableTotal / (float) totalSlots;
    rb.addCounter(ExecutorTotalRequestsHandled, executorTotalRequestHandled.value()).addCounter(ExecutorTotalSuccess, executorTotalSuccess.value()).addCounter(ExecutorTotalFailed, executorTotalExecutionFailed.value()).addCounter(ExecutorTotalKilled, executorTotalIKilled.value()).addCounter(ExecutorTotalEvictedFromWaitQueue, totalEvictedFromWaitQueue.value()).addCounter(ExecutorTotalRejectedRequests, totalRejectedRequests.value()).addGauge(ExecutorNumQueuedRequests, executorNumQueuedRequests.value()).addGauge(ExecutorNumPreemptableRequests, executorNumPreemptableRequests.value()).addGauge(ExecutorMemoryPerInstance, memoryPerInstance.value()).addGauge(ExecutorCacheMemoryPerInstance, cacheMemoryPerInstance.value()).addGauge(ExecutorJvmMaxMemory, jvmMaxMemory.value()).addGauge(ExecutorMaxFreeSlotsConfigured, totalConfiguredSlots).addGauge(ExecutorMaxFreeSlots, totalSlots).addGauge(ExecutorNumExecutors, numExecutors.value()).addGauge(ExecutorNumExecutorsConfigured, numExecutorsConfigured).addGauge(ExecutorWaitQueueSizeConfigured, waitQueueSizeConfigured).addGauge(ExecutorWaitQueueSize, waitQueueSize.value()).addGauge(ExecutorNumExecutorsAvailable, numExecutorsAvailable.value()).addGauge(ExecutorAvailableFreeSlots, slotsAvailableTotal).addGauge(ExecutorAvailableFreeSlotsPercent, slotsAvailablePercent).addCounter(ExecutorTotalPreemptionTimeToKill, totalPreemptionTimeToKill.value()).addCounter(ExecutorTotalPreemptionTimeLost, totalPreemptionTimeLost.value()).addGauge(ExecutorMaxPreemptionTimeToKill, maxPreemptionTimeToKill.value()).addGauge(ExecutorMaxPreemptionTimeLost, maxPreemptionTimeLost.value()).addCounter(ExecutorFallOffSuccessTimeLost, fallOffSuccessTimeLost.value()).addGauge(ExecutorFallOffSuccessMaxTimeLost, fallOffMaxSuccessTimeLost.value()).addCounter(ExecutorFallOffFailedTimeLost, fallOffFailedTimeLost.value()).addGauge(ExecutorFallOffFailedMaxTimeLost, fallOffMaxFailedTimeLost.value()).addCounter(ExecutorFallOffKilledTimeLost, fallOffKilledTimeLost.value()).addGauge(ExecutorFallOffKilledMaxTimeLost, fallOffMaxKilledTimeLost.value()).addCounter(ExecutorFallOffNumCompletedFragments, fallOffNumCompletedFragments.value());
    if (numExecutorsAvailableAverage != null) {
        rb.addGauge(ExecutorNumExecutorsAvailableAverage, numExecutorsAvailableAverage.value());
    }
    if (executorNumQueuedRequestsAverage != null) {
        rb.addGauge(ExecutorNumQueuedRequestsAverage, executorNumQueuedRequestsAverage.value());
    }
    if (queueTime != null) {
        rb.addGauge(AverageQueueTime, queueTime.getSum() / queueTime.getN());
    }
    if (runningTime != null) {
        rb.addGauge(AverageResponseTime, runningTime.getSum() / runningTime.getN());
    }
    for (MutableQuantiles q : percentileTimeToKill) {
        q.snapshot(rb, true);
    }
    for (MutableQuantiles q : percentileTimeLost) {
        q.snapshot(rb, true);
    }
}
Also used : MutableQuantiles(org.apache.hadoop.metrics2.lib.MutableQuantiles)

Example 52 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hive by apache.

the class LlapDaemonExecutorMetrics method getMetrics.

@Override
public void getMetrics(MetricsCollector collector, boolean b) {
    MetricsRecordBuilder rb = collector.addRecord(ExecutorMetrics).setContext("executors").tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME).tag(SessionId, sessionId);
    getExecutorStats(rb);
}
Also used : MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Example 53 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hive by apache.

the class LlapDaemonIOMetrics method getIoStats.

private void getIoStats(MetricsRecordBuilder rb) {
    rb.addGauge(MaxDecodingTime, maxDecodingTime.value());
    rateOfDecoding.snapshot(rb, true);
    for (MutableQuantiles q : decodingTimes) {
        q.snapshot(rb, true);
    }
}
Also used : MutableQuantiles(org.apache.hadoop.metrics2.lib.MutableQuantiles)

Example 54 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hive by apache.

the class LlapDaemonIOMetrics method getMetrics.

@Override
public void getMetrics(MetricsCollector collector, boolean b) {
    MetricsRecordBuilder rb = collector.addRecord(IOMetrics).setContext("io").tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME).tag(SessionId, sessionId);
    getIoStats(rb);
}
Also used : MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Example 55 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hive by apache.

the class JvmMetrics method getGcUsage.

private void getGcUsage(MetricsRecordBuilder rb) {
    long count = 0;
    long timeMillis = 0;
    for (GarbageCollectorMXBean gcBean : gcBeans) {
        long c = gcBean.getCollectionCount();
        long t = gcBean.getCollectionTime();
        MetricsInfo[] gcInfo = getGcInfo(gcBean.getName());
        rb.addCounter(gcInfo[0], c).addCounter(gcInfo[1], t);
        count += c;
        timeMillis += t;
    }
    rb.addCounter(GcCount, count).addCounter(GcTimeMillis, timeMillis);
    if (pauseMonitor != null) {
        rb.addCounter(GcNumWarnThresholdExceeded, pauseMonitor.getNumGcWarnThreadholdExceeded());
        rb.addCounter(GcNumInfoThresholdExceeded, pauseMonitor.getNumGcInfoThresholdExceeded());
        rb.addCounter(GcTotalExtraSleepTime, pauseMonitor.getTotalGcExtraSleepTime());
    }
}
Also used : MetricsInfo(org.apache.hadoop.metrics2.MetricsInfo) JvmMetricsInfo(org.apache.hadoop.hive.common.JvmMetricsInfo) GarbageCollectorMXBean(java.lang.management.GarbageCollectorMXBean)

Aggregations

MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)99 Test (org.junit.Test)47 Path (org.apache.hadoop.fs.Path)20 Configuration (org.apache.hadoop.conf.Configuration)14 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)12 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 FileSystem (org.apache.hadoop.fs.FileSystem)8 MetricsInfo (org.apache.hadoop.metrics2.MetricsInfo)7 IOException (java.io.IOException)6 MetricsCollector (org.apache.hadoop.metrics2.MetricsCollector)6 MetricsSource (org.apache.hadoop.metrics2.MetricsSource)5 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Quantile (org.apache.hadoop.metrics2.util.Quantile)4 ServiceException (com.google.protobuf.ServiceException)3 InterruptedIOException (java.io.InterruptedIOException)2 GarbageCollectorMXBean (java.lang.management.GarbageCollectorMXBean)2 Map (java.util.Map)2 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)2 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)2