use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hive by apache.
the class LlapDaemonExecutorMetrics method getExecutorStats.
private void getExecutorStats(MetricsRecordBuilder rb) {
updateThreadMetrics(rb);
final int totalConfiguredSlots = waitQueueSizeConfigured + numExecutorsConfigured;
final int totalSlots = waitQueueSize.value() + numExecutors.value();
final int slotsAvailableInQueue = waitQueueSize.value() - executorNumQueuedRequests.value();
final int slotsAvailableTotal = slotsAvailableInQueue + numExecutorsAvailable.value();
final float slotsAvailablePercent = totalSlots <= 0 ? 0.0f : (float) slotsAvailableTotal / (float) totalSlots;
rb.addCounter(ExecutorTotalRequestsHandled, executorTotalRequestHandled.value()).addCounter(ExecutorTotalSuccess, executorTotalSuccess.value()).addCounter(ExecutorTotalFailed, executorTotalExecutionFailed.value()).addCounter(ExecutorTotalKilled, executorTotalIKilled.value()).addCounter(ExecutorTotalEvictedFromWaitQueue, totalEvictedFromWaitQueue.value()).addCounter(ExecutorTotalRejectedRequests, totalRejectedRequests.value()).addGauge(ExecutorNumQueuedRequests, executorNumQueuedRequests.value()).addGauge(ExecutorNumPreemptableRequests, executorNumPreemptableRequests.value()).addGauge(ExecutorMemoryPerInstance, memoryPerInstance.value()).addGauge(ExecutorCacheMemoryPerInstance, cacheMemoryPerInstance.value()).addGauge(ExecutorJvmMaxMemory, jvmMaxMemory.value()).addGauge(ExecutorMaxFreeSlotsConfigured, totalConfiguredSlots).addGauge(ExecutorMaxFreeSlots, totalSlots).addGauge(ExecutorNumExecutors, numExecutors.value()).addGauge(ExecutorNumExecutorsConfigured, numExecutorsConfigured).addGauge(ExecutorWaitQueueSizeConfigured, waitQueueSizeConfigured).addGauge(ExecutorWaitQueueSize, waitQueueSize.value()).addGauge(ExecutorNumExecutorsAvailable, numExecutorsAvailable.value()).addGauge(ExecutorAvailableFreeSlots, slotsAvailableTotal).addGauge(ExecutorAvailableFreeSlotsPercent, slotsAvailablePercent).addCounter(ExecutorTotalPreemptionTimeToKill, totalPreemptionTimeToKill.value()).addCounter(ExecutorTotalPreemptionTimeLost, totalPreemptionTimeLost.value()).addGauge(ExecutorMaxPreemptionTimeToKill, maxPreemptionTimeToKill.value()).addGauge(ExecutorMaxPreemptionTimeLost, maxPreemptionTimeLost.value()).addCounter(ExecutorFallOffSuccessTimeLost, fallOffSuccessTimeLost.value()).addGauge(ExecutorFallOffSuccessMaxTimeLost, fallOffMaxSuccessTimeLost.value()).addCounter(ExecutorFallOffFailedTimeLost, fallOffFailedTimeLost.value()).addGauge(ExecutorFallOffFailedMaxTimeLost, fallOffMaxFailedTimeLost.value()).addCounter(ExecutorFallOffKilledTimeLost, fallOffKilledTimeLost.value()).addGauge(ExecutorFallOffKilledMaxTimeLost, fallOffMaxKilledTimeLost.value()).addCounter(ExecutorFallOffNumCompletedFragments, fallOffNumCompletedFragments.value());
if (numExecutorsAvailableAverage != null) {
rb.addGauge(ExecutorNumExecutorsAvailableAverage, numExecutorsAvailableAverage.value());
}
if (executorNumQueuedRequestsAverage != null) {
rb.addGauge(ExecutorNumQueuedRequestsAverage, executorNumQueuedRequestsAverage.value());
}
if (queueTime != null) {
rb.addGauge(AverageQueueTime, queueTime.getSum() / queueTime.getN());
}
if (runningTime != null) {
rb.addGauge(AverageResponseTime, runningTime.getSum() / runningTime.getN());
}
for (MutableQuantiles q : percentileTimeToKill) {
q.snapshot(rb, true);
}
for (MutableQuantiles q : percentileTimeLost) {
q.snapshot(rb, true);
}
}
use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hive by apache.
the class LlapDaemonExecutorMetrics method getMetrics.
@Override
public void getMetrics(MetricsCollector collector, boolean b) {
MetricsRecordBuilder rb = collector.addRecord(ExecutorMetrics).setContext("executors").tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME).tag(SessionId, sessionId);
getExecutorStats(rb);
}
use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hive by apache.
the class LlapDaemonIOMetrics method getIoStats.
private void getIoStats(MetricsRecordBuilder rb) {
rb.addGauge(MaxDecodingTime, maxDecodingTime.value());
rateOfDecoding.snapshot(rb, true);
for (MutableQuantiles q : decodingTimes) {
q.snapshot(rb, true);
}
}
use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hive by apache.
the class LlapDaemonIOMetrics method getMetrics.
@Override
public void getMetrics(MetricsCollector collector, boolean b) {
MetricsRecordBuilder rb = collector.addRecord(IOMetrics).setContext("io").tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME).tag(SessionId, sessionId);
getIoStats(rb);
}
use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hive by apache.
the class JvmMetrics method getGcUsage.
private void getGcUsage(MetricsRecordBuilder rb) {
long count = 0;
long timeMillis = 0;
for (GarbageCollectorMXBean gcBean : gcBeans) {
long c = gcBean.getCollectionCount();
long t = gcBean.getCollectionTime();
MetricsInfo[] gcInfo = getGcInfo(gcBean.getName());
rb.addCounter(gcInfo[0], c).addCounter(gcInfo[1], t);
count += c;
timeMillis += t;
}
rb.addCounter(GcCount, count).addCounter(GcTimeMillis, timeMillis);
if (pauseMonitor != null) {
rb.addCounter(GcNumWarnThresholdExceeded, pauseMonitor.getNumGcWarnThreadholdExceeded());
rb.addCounter(GcNumInfoThresholdExceeded, pauseMonitor.getNumGcInfoThresholdExceeded());
rb.addCounter(GcTotalExtraSleepTime, pauseMonitor.getTotalGcExtraSleepTime());
}
}
Aggregations