use of org.apache.hadoop.metrics2.lib.MutableQuantiles in project hadoop by apache.
the class TestMutableMetrics method testMutableQuantilesRollover.
/**
* Test that {@link MutableQuantiles} rolls the window over at the specified
* interval.
*/
@Test(timeout = 30000)
public void testMutableQuantilesRollover() throws Exception {
MetricsRecordBuilder mb = mockMetricsRecordBuilder();
MetricsRegistry registry = new MetricsRegistry("test");
// Use a 5s rollover period
MutableQuantiles quantiles = registry.newQuantiles("foo", "stat", "Ops", "Latency", 5);
Quantile[] quants = MutableQuantiles.quantiles;
String name = "Foo%dthPercentileLatency";
String desc = "%d percentile latency with 5 second interval for stat";
// Push values for three intervals
long start = System.nanoTime() / 1000000;
for (int i = 1; i <= 3; i++) {
// Insert the values
for (long j = 1; j <= 1000; j++) {
quantiles.add(i);
}
// Sleep until 1s after the next 5s interval, to let the metrics
// roll over
long sleep = (start + (5000 * i) + 1000) - (System.nanoTime() / 1000000);
Thread.sleep(sleep);
// Verify that the window reset, check it has the values we pushed in
registry.snapshot(mb, false);
for (Quantile q : quants) {
int percentile = (int) (100 * q.quantile);
String n = String.format(name, percentile);
String d = String.format(desc, percentile);
verify(mb).addGauge(info(n, d), (long) i);
}
}
// Verify the metrics were added the right number of times
verify(mb, times(3)).addGauge(info("FooNumOps", "Number of ops for stat with 5s interval"), (long) 1000);
for (Quantile q : quants) {
int percentile = (int) (100 * q.quantile);
String n = String.format(name, percentile);
String d = String.format(desc, percentile);
verify(mb, times(3)).addGauge(eq(info(n, d)), anyLong());
}
}
use of org.apache.hadoop.metrics2.lib.MutableQuantiles in project hadoop by apache.
the class DataNodeVolumeMetrics method addDataFileIoLatency.
public void addDataFileIoLatency(final long latency) {
totalDataFileIos.incr();
dataFileIoRate.add(latency);
for (MutableQuantiles q : dataFileIoLatencyQuantiles) {
q.add(latency);
}
}
use of org.apache.hadoop.metrics2.lib.MutableQuantiles in project hadoop by apache.
the class TestReadWriteDiskValidator method testReadWriteDiskValidator.
@Test
public void testReadWriteDiskValidator() throws DiskErrorException, InterruptedException {
int count = 100;
File testDir = new File(System.getProperty("test.build.data"));
ReadWriteDiskValidator readWriteDiskValidator = (ReadWriteDiskValidator) DiskValidatorFactory.getInstance(ReadWriteDiskValidator.NAME);
for (int i = 0; i < count; i++) {
readWriteDiskValidator.checkStatus(testDir);
}
ReadWriteDiskValidatorMetrics metric = ReadWriteDiskValidatorMetrics.getMetric(testDir.toString());
Assert.assertEquals("The count number of estimator in MutableQuantiles" + "metrics of file read is not right", metric.getFileReadQuantiles()[0].getEstimator().getCount(), count);
Assert.assertEquals("The count number of estimator in MutableQuantiles" + "metrics of file write is not right", metric.getFileWriteQuantiles()[0].getEstimator().getCount(), count);
MetricsSource source = ms.getSource(ReadWriteDiskValidatorMetrics.sourceName(testDir.toString()));
MetricsCollectorImpl collector = new MetricsCollectorImpl();
source.getMetrics(collector, true);
MetricsRecords.assertMetric(collector.getRecords().get(0), "FailureCount", 0);
MetricsRecords.assertMetric(collector.getRecords().get(0), "LastFailureTime", (long) 0);
// All MutableQuantiles haven't rolled over yet because the minimum
// interval is 1 hours, so we just test if these metrics exist.
MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "WriteLatency3600sNumOps");
MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "WriteLatency3600s50thPercentileLatencyMicros");
MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "WriteLatency86400sNumOps");
MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "WriteLatency864000sNumOps");
MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "ReadLatency3600sNumOps");
MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "ReadLatency3600s50thPercentileLatencyMicros");
MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "ReadLatency86400sNumOps");
MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "ReadLatency864000sNumOps");
}
use of org.apache.hadoop.metrics2.lib.MutableQuantiles in project hive by apache.
the class LlapDaemonExecutorMetrics method getExecutorStats.
private void getExecutorStats(MetricsRecordBuilder rb) {
updateThreadMetrics(rb);
final int totalSlots = waitQueueSize.value() + numExecutors;
final int slotsAvailableInQueue = waitQueueSize.value() - executorNumQueuedRequests.value();
final int slotsAvailableTotal = slotsAvailableInQueue + numExecutorsAvailable.value();
final float slotsAvailablePercent = totalSlots <= 0 ? 0.0f : (float) slotsAvailableTotal / (float) totalSlots;
rb.addCounter(ExecutorTotalRequestsHandled, executorTotalRequestHandled.value()).addCounter(ExecutorTotalSuccess, executorTotalSuccess.value()).addCounter(ExecutorTotalFailed, executorTotalExecutionFailed.value()).addCounter(ExecutorTotalKilled, executorTotalIKilled.value()).addCounter(ExecutorTotalEvictedFromWaitQueue, totalEvictedFromWaitQueue.value()).addCounter(ExecutorTotalRejectedRequests, totalRejectedRequests.value()).addGauge(ExecutorNumQueuedRequests, executorNumQueuedRequests.value()).addGauge(ExecutorNumPreemptableRequests, executorNumPreemptableRequests.value()).addGauge(ExecutorMemoryPerInstance, memoryPerInstance.value()).addGauge(ExecutorCacheMemoryPerInstance, cacheMemoryPerInstance.value()).addGauge(ExecutorJvmMaxMemory, jvmMaxMemory.value()).addGauge(ExecutorMaxFreeSlots, totalSlots).addGauge(ExecutorNumExecutorsPerInstance, numExecutors).addGauge(ExecutorWaitQueueSize, waitQueueSize.value()).addGauge(ExecutorNumExecutorsAvailable, numExecutorsAvailable.value()).addGauge(ExecutorAvailableFreeSlots, slotsAvailableTotal).addGauge(ExecutorAvailableFreeSlotsPercent, slotsAvailablePercent).addCounter(ExecutorTotalPreemptionTimeToKill, totalPreemptionTimeToKill.value()).addCounter(ExecutorTotalPreemptionTimeLost, totalPreemptionTimeLost.value()).addGauge(ExecutorMaxPreemptionTimeToKill, maxPreemptionTimeToKill.value()).addGauge(ExecutorMaxPreemptionTimeLost, maxPreemptionTimeLost.value()).addCounter(ExecutorFallOffSuccessTimeLost, fallOffSuccessTimeLost.value()).addGauge(ExecutorFallOffSuccessMaxTimeLost, fallOffMaxSuccessTimeLost.value()).addCounter(ExecutorFallOffFailedTimeLost, fallOffFailedTimeLost.value()).addGauge(ExecutorFallOffFailedMaxTimeLost, fallOffMaxFailedTimeLost.value()).addCounter(ExecutorFallOffKilledTimeLost, fallOffKilledTimeLost.value()).addGauge(ExecutorFallOffKilledMaxTimeLost, fallOffMaxKilledTimeLost.value()).addCounter(ExecutorFallOffNumCompletedFragments, fallOffNumCompletedFragments.value());
for (MutableQuantiles q : percentileTimeToKill) {
q.snapshot(rb, true);
}
for (MutableQuantiles q : percentileTimeLost) {
q.snapshot(rb, true);
}
}
use of org.apache.hadoop.metrics2.lib.MutableQuantiles in project hive by apache.
the class LlapDaemonExecutorMetrics method addMetricsPreemptionTimeLost.
public void addMetricsPreemptionTimeLost(long value) {
totalPreemptionTimeLost.incr(value);
if (value > maxTimeLost) {
maxTimeLost = value;
maxPreemptionTimeLost.set(maxTimeLost);
}
for (MutableQuantiles q : percentileTimeLost) {
q.add(value);
}
}
Aggregations