use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestGraphiteMetrics method testPutMetrics3.
/**
* Assert that timestamps are converted correctly, ticket HADOOP-11182
*/
@Test
public void testPutMetrics3() {
// setup GraphiteSink
GraphiteSink sink = new GraphiteSink();
final GraphiteSink.Graphite mockGraphite = makeGraphite();
Whitebox.setInternalState(sink, "graphite", mockGraphite);
// given two metrics records with timestamps 1000 milliseconds apart.
List<MetricsTag> tags = Collections.emptyList();
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
metrics.add(makeMetric("foo1", 1));
MetricsRecord record1 = new MetricsRecordImpl(MsInfo.Context, 1000000000000L, tags, metrics);
MetricsRecord record2 = new MetricsRecordImpl(MsInfo.Context, 1000000001000L, tags, metrics);
sink.putMetrics(record1);
sink.putMetrics(record2);
sink.flush();
try {
sink.close();
} catch (IOException e) {
e.printStackTrace();
}
// then the timestamps in the graphite stream should differ by one second.
try {
verify(mockGraphite).write(eq("null.default.Context.foo1 1 1000000000\n"));
verify(mockGraphite).write(eq("null.default.Context.foo1 1 1000000001\n"));
} catch (IOException e) {
e.printStackTrace();
}
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestMetricsSourceAdapter method testMetricCacheUpdateRace.
/**
* Test a race condition when updating the JMX cache (HADOOP-12482):
* 1. Thread A reads the JMX metric every 2 JMX cache TTL. It marks the JMX
* cache to be updated by marking lastRecs to null. After this it adds a
* new key to the metrics. The next call to read should pick up this new
* key.
* 2. Thread B triggers JMX metric update every 1 JMX cache TTL. It assigns
* lastRecs to a new object (not null any more).
* 3. Thread A tries to read JMX metric again, sees lastRecs is not null and
* does not update JMX cache. As a result the read does not pickup the new
* metric.
* @throws Exception
*/
@Test
public void testMetricCacheUpdateRace() throws Exception {
// Create test source with a single metric counter of value 1.
TestMetricsSource source = new TestMetricsSource();
MetricsSourceBuilder sourceBuilder = MetricsAnnotations.newSourceBuilder(source);
// ms
final long JMX_CACHE_TTL = 250;
List<MetricsTag> injectedTags = new ArrayList<>();
MetricsSourceAdapter sourceAdapter = new MetricsSourceAdapter("test", "test", "test JMX cache update race condition", sourceBuilder.build(), injectedTags, null, null, JMX_CACHE_TTL, false);
ScheduledExecutorService updaterExecutor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder().build());
ScheduledExecutorService readerExecutor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder().build());
final AtomicBoolean hasError = new AtomicBoolean(false);
// Wake up every 1 JMX cache TTL to set lastRecs before updateJmxCache() is
// called.
SourceUpdater srcUpdater = new SourceUpdater(sourceAdapter, hasError);
ScheduledFuture<?> updaterFuture = updaterExecutor.scheduleAtFixedRate(srcUpdater, sourceAdapter.getJmxCacheTTL(), sourceAdapter.getJmxCacheTTL(), TimeUnit.MILLISECONDS);
srcUpdater.setFuture(updaterFuture);
// Wake up every 2 JMX cache TTL so updateJmxCache() will try to update
// JMX cache.
SourceReader srcReader = new SourceReader(source, sourceAdapter, hasError);
ScheduledFuture<?> readerFuture = readerExecutor.scheduleAtFixedRate(srcReader, // set JMX info cache at the beginning
0, 2 * sourceAdapter.getJmxCacheTTL(), TimeUnit.MILLISECONDS);
srcReader.setFuture(readerFuture);
// Let the threads do their work.
Thread.sleep(RACE_TEST_RUNTIME);
assertFalse("Hit error", hasError.get());
// cleanup
updaterExecutor.shutdownNow();
readerExecutor.shutdownNow();
updaterExecutor.awaitTermination(1000, TimeUnit.MILLISECONDS);
readerExecutor.awaitTermination(1000, TimeUnit.MILLISECONDS);
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestMetricsSystemImpl method testInitFirstVerifyCallBacks.
@Test
public void testInitFirstVerifyCallBacks() throws Exception {
DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period", 8).add("test.sink.test.class", TestSink.class.getName()).add("test.*.source.filter.exclude", "s0").add("test.source.s1.metric.filter.exclude", "X*").add("test.sink.sink1.metric.filter.exclude", "Y*").add("test.sink.sink2.metric.filter.exclude", "Y*").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
ms.register("s0", "s0 desc", new TestSource("s0rec"));
TestSource s1 = ms.register("s1", "s1 desc", new TestSource("s1rec"));
s1.c1.incr();
s1.xxx.incr();
s1.g1.set(2);
s1.yyy.incr(2);
s1.s1.add(0);
MetricsSink sink1 = mock(MetricsSink.class);
MetricsSink sink2 = mock(MetricsSink.class);
ms.registerSink("sink1", "sink1 desc", sink1);
ms.registerSink("sink2", "sink2 desc", sink2);
// publish the metrics
ms.publishMetricsNow();
try {
verify(sink1, timeout(200).times(2)).putMetrics(r1.capture());
verify(sink2, timeout(200).times(2)).putMetrics(r2.capture());
} finally {
ms.stop();
ms.shutdown();
}
//When we call stop, at most two sources will be consumed by each sink thread.
List<MetricsRecord> mr1 = r1.getAllValues();
List<MetricsRecord> mr2 = r2.getAllValues();
checkMetricsRecords(mr1);
assertEquals("output", mr1, mr2);
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestMetricsSystemImpl method testRegisterSourceWithoutName.
@Test
public void testRegisterSourceWithoutName() {
MetricsSystem ms = new MetricsSystemImpl();
TestSource ts = new TestSource("ts");
TestSource2 ts2 = new TestSource2("ts2");
ms.register(ts);
ms.register(ts2);
ms.init("TestMetricsSystem");
// if metrics source is registered without name,
// the class name will be used as the name
MetricsSourceAdapter sa = ((MetricsSystemImpl) ms).getSourceAdapter("TestSource");
assertNotNull(sa);
MetricsSourceAdapter sa2 = ((MetricsSystemImpl) ms).getSourceAdapter("TestSource2");
assertNotNull(sa2);
ms.shutdown();
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestMetricsSystemImpl method testRegisterDups.
@Test
public void testRegisterDups() {
MetricsSystem ms = new MetricsSystemImpl();
TestSource ts1 = new TestSource("ts1");
TestSource ts2 = new TestSource("ts2");
ms.register("ts1", "", ts1);
MetricsSource s1 = ms.getSource("ts1");
assertNotNull(s1);
// should work when metrics system is not started
ms.register("ts1", "", ts2);
MetricsSource s2 = ms.getSource("ts1");
assertNotNull(s2);
assertNotSame(s1, s2);
ms.shutdown();
}
Aggregations