use of com.codahale.metrics.Meter in project metrics by dropwizard.
the class SingletonMetricsJerseyTest method exceptionMeteredMethodsAreExceptionMetered.
@Test
public void exceptionMeteredMethodsAreExceptionMetered() {
final Meter meter = registry.meter(name(InstrumentedResource.class, "exceptionMetered", "exceptions"));
assertThat(resource().path("exception-metered").get(String.class)).isEqualTo("fuh");
assertThat(meter.getCount()).isZero();
try {
resource().path("exception-metered").queryParam("splode", "true").get(String.class);
failBecauseExceptionWasNotThrown(MappableContainerException.class);
} catch (MappableContainerException e) {
assertThat(e.getCause()).isInstanceOf(IOException.class);
}
assertThat(meter.getCount()).isEqualTo(1);
}
use of com.codahale.metrics.Meter in project hadoop by apache.
the class KMSWebApp method contextInitialized.
@Override
public void contextInitialized(ServletContextEvent sce) {
try {
String confDir = System.getProperty(KMSConfiguration.KMS_CONFIG_DIR);
if (confDir == null) {
throw new RuntimeException("System property '" + KMSConfiguration.KMS_CONFIG_DIR + "' not defined");
}
kmsConf = KMSConfiguration.getKMSConf();
initLogging(confDir);
UserGroupInformation.setConfiguration(kmsConf);
LOG.info("-------------------------------------------------------------");
LOG.info(" Java runtime version : {}", System.getProperty("java.runtime.version"));
LOG.info(" User: {}", System.getProperty("user.name"));
LOG.info(" KMS Hadoop Version: " + VersionInfo.getVersion());
LOG.info("-------------------------------------------------------------");
kmsAcls = new KMSACLs();
kmsAcls.startReloader();
metricRegistry = new MetricRegistry();
jmxReporter = JmxReporter.forRegistry(metricRegistry).build();
jmxReporter.start();
generateEEKCallsMeter = metricRegistry.register(GENERATE_EEK_METER, new Meter());
decryptEEKCallsMeter = metricRegistry.register(DECRYPT_EEK_METER, new Meter());
adminCallsMeter = metricRegistry.register(ADMIN_CALLS_METER, new Meter());
keyCallsMeter = metricRegistry.register(KEY_CALLS_METER, new Meter());
invalidCallsMeter = metricRegistry.register(INVALID_CALLS_METER, new Meter());
unauthorizedCallsMeter = metricRegistry.register(UNAUTHORIZED_CALLS_METER, new Meter());
unauthenticatedCallsMeter = metricRegistry.register(UNAUTHENTICATED_CALLS_METER, new Meter());
kmsAudit = new KMSAudit(kmsConf);
// intializing the KeyProvider
String providerString = kmsConf.get(KMSConfiguration.KEY_PROVIDER_URI);
if (providerString == null) {
throw new IllegalStateException("No KeyProvider has been defined");
}
KeyProvider keyProvider = KeyProviderFactory.get(new URI(providerString), kmsConf);
if (kmsConf.getBoolean(KMSConfiguration.KEY_CACHE_ENABLE, KMSConfiguration.KEY_CACHE_ENABLE_DEFAULT)) {
long keyTimeOutMillis = kmsConf.getLong(KMSConfiguration.KEY_CACHE_TIMEOUT_KEY, KMSConfiguration.KEY_CACHE_TIMEOUT_DEFAULT);
long currKeyTimeOutMillis = kmsConf.getLong(KMSConfiguration.CURR_KEY_CACHE_TIMEOUT_KEY, KMSConfiguration.CURR_KEY_CACHE_TIMEOUT_DEFAULT);
keyProvider = new CachingKeyProvider(keyProvider, keyTimeOutMillis, currKeyTimeOutMillis);
}
LOG.info("Initialized KeyProvider " + keyProvider);
keyProviderCryptoExtension = KeyProviderCryptoExtension.createKeyProviderCryptoExtension(keyProvider);
keyProviderCryptoExtension = new EagerKeyGeneratorKeyProviderCryptoExtension(kmsConf, keyProviderCryptoExtension);
if (kmsConf.getBoolean(KMSConfiguration.KEY_AUTHORIZATION_ENABLE, KMSConfiguration.KEY_AUTHORIZATION_ENABLE_DEFAULT)) {
keyProviderCryptoExtension = new KeyAuthorizationKeyProvider(keyProviderCryptoExtension, kmsAcls);
}
LOG.info("Initialized KeyProviderCryptoExtension " + keyProviderCryptoExtension);
final int defaultBitlength = kmsConf.getInt(KeyProvider.DEFAULT_BITLENGTH_NAME, KeyProvider.DEFAULT_BITLENGTH);
LOG.info("Default key bitlength is {}", defaultBitlength);
LOG.info("KMS Started");
} catch (Throwable ex) {
System.out.println();
System.out.println("ERROR: Hadoop KMS could not be started");
System.out.println();
System.out.println("REASON: " + ex.toString());
System.out.println();
System.out.println("Stacktrace:");
System.out.println("---------------------------------------------------");
ex.printStackTrace(System.out);
System.out.println("---------------------------------------------------");
System.out.println();
System.exit(1);
}
}
use of com.codahale.metrics.Meter in project lucene-solr by apache.
the class TestRecovery method testLogReplay.
@Test
public void testLogReplay() throws Exception {
try {
DirectUpdateHandler2.commitOnClose = false;
final Semaphore logReplay = new Semaphore(0);
final Semaphore logReplayFinish = new Semaphore(0);
UpdateLog.testing_logReplayHook = () -> {
try {
assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
} catch (Exception e) {
throw new RuntimeException(e);
}
};
UpdateLog.testing_logReplayFinishHook = () -> logReplayFinish.release();
clearIndex();
assertU(commit());
Deque<Long> versions = new ArrayDeque<>();
versions.addFirst(addAndGetVersion(sdoc("id", "A1"), null));
versions.addFirst(addAndGetVersion(sdoc("id", "A11"), null));
versions.addFirst(addAndGetVersion(sdoc("id", "A12"), null));
versions.addFirst(deleteByQueryAndGetVersion("id:A11", null));
versions.addFirst(addAndGetVersion(sdoc("id", "A13"), null));
// atomic update
versions.addFirst(addAndGetVersion(sdoc("id", "A12", "val_i_dvo", map("set", 1)), null));
// in-place update
versions.addFirst(addAndGetVersion(sdoc("id", "A12", "val_i_dvo", map("set", 2)), null));
assertJQ(req("q", "*:*"), "/response/numFound==0");
assertJQ(req("qt", "/get", "getVersions", "" + versions.size()), "/versions==" + versions);
h.close();
createCore();
// live map view
Map<String, Metric> metrics = getMetrics();
// Solr should kick this off now
// h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog();
// verify that previous close didn't do a commit
// recovery should be blocked by our hook
assertJQ(req("q", "*:*"), "/response/numFound==0");
// make sure we can still access versions after a restart
assertJQ(req("qt", "/get", "getVersions", "" + versions.size()), "/versions==" + versions);
assertEquals(UpdateLog.State.REPLAYING, h.getCore().getUpdateHandler().getUpdateLog().getState());
// check metrics
Gauge<Integer> state = (Gauge<Integer>) metrics.get("TLOG.state");
assertEquals(UpdateLog.State.REPLAYING.ordinal(), state.getValue().intValue());
Gauge<Integer> replayingLogs = (Gauge<Integer>) metrics.get("TLOG.replay.remaining.logs");
assertTrue(replayingLogs.getValue().intValue() > 0);
Gauge<Long> replayingDocs = (Gauge<Long>) metrics.get("TLOG.replay.remaining.bytes");
assertTrue(replayingDocs.getValue().longValue() > 0);
Meter replayDocs = (Meter) metrics.get("TLOG.replay.ops");
long initialOps = replayDocs.getCount();
// unblock recovery
logReplay.release(1000);
// make sure we can still access versions during recovery
assertJQ(req("qt", "/get", "getVersions", "" + versions.size()), "/versions==" + versions);
// wait until recovery has finished
assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
// assert that in-place update is retained
assertJQ(req("q", "val_i_dvo:2"), "/response/numFound==1");
assertJQ(req("q", "*:*"), "/response/numFound==3");
assertEquals(7L, replayDocs.getCount() - initialOps);
assertEquals(UpdateLog.State.ACTIVE.ordinal(), state.getValue().intValue());
// make sure we can still access versions after recovery
assertJQ(req("qt", "/get", "getVersions", "" + versions.size()), "/versions==" + versions);
assertU(adoc("id", "A2"));
assertU(adoc("id", "A3"));
assertU(delI("A2"));
assertU(adoc("id", "A4"));
assertJQ(req("q", "*:*"), "/response/numFound==3");
// assert that in-place update is retained
assertJQ(req("q", "val_i_dvo:2"), "/response/numFound==1");
h.close();
createCore();
// Solr should kick this off now
// h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog();
// wait until recovery has finished
assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
assertJQ(req("q", "*:*"), "/response/numFound==5");
assertJQ(req("q", "id:A2"), "/response/numFound==0");
// no updates, so insure that recovery does not run
h.close();
int permits = logReplay.availablePermits();
createCore();
// Solr should kick this off now
// h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog();
assertJQ(req("q", "*:*"), "/response/numFound==5");
// assert that in-place update is retained
assertJQ(req("q", "val_i_dvo:2"), "/response/numFound==1");
Thread.sleep(100);
// no updates, so insure that recovery didn't run
assertEquals(permits, logReplay.availablePermits());
assertEquals(UpdateLog.State.ACTIVE, h.getCore().getUpdateHandler().getUpdateLog().getState());
} finally {
DirectUpdateHandler2.commitOnClose = true;
UpdateLog.testing_logReplayHook = null;
UpdateLog.testing_logReplayFinishHook = null;
}
}
use of com.codahale.metrics.Meter in project lucene-solr by apache.
the class TestRecovery method testBuffering.
@Test
public void testBuffering() throws Exception {
DirectUpdateHandler2.commitOnClose = false;
final Semaphore logReplay = new Semaphore(0);
final Semaphore logReplayFinish = new Semaphore(0);
UpdateLog.testing_logReplayHook = () -> {
try {
assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
} catch (Exception e) {
throw new RuntimeException(e);
}
};
UpdateLog.testing_logReplayFinishHook = logReplayFinish::release;
SolrQueryRequest req = req();
UpdateHandler uhandler = req.getCore().getUpdateHandler();
UpdateLog ulog = uhandler.getUpdateLog();
try {
clearIndex();
assertU(commit());
Map<String, Metric> metrics = getMetrics();
assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
ulog.bufferUpdates();
assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
Future<UpdateLog.RecoveryInfo> rinfoFuture = ulog.applyBufferedUpdates();
assertTrue(rinfoFuture == null);
assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
ulog.bufferUpdates();
assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
Gauge<Integer> state = (Gauge<Integer>) metrics.get("TLOG.state");
assertEquals(UpdateLog.State.BUFFERING.ordinal(), state.getValue().intValue());
Gauge<Integer> bufferedOps = (Gauge<Integer>) metrics.get("TLOG.buffered.ops");
int initialOps = bufferedOps.getValue();
Meter applyingBuffered = (Meter) metrics.get("TLOG.applyingBuffered.ops");
long initialApplyingOps = applyingBuffered.getCount();
String v3 = getNextVersion();
String v940_del = "-" + getNextVersion();
String v950_del = "-" + getNextVersion();
String v1010 = getNextVersion();
String v1015 = getNextVersion();
String v1017_del = "-" + getNextVersion();
String v1020 = getNextVersion();
String v1030 = getNextVersion();
String v1040 = getNextVersion();
String v1050 = getNextVersion();
String v1060 = getNextVersion();
String v1070 = getNextVersion();
String v1080 = getNextVersion();
String v2010_del = "-" + getNextVersion();
String v2060_del = "-" + getNextVersion();
String v3000_del = "-" + getNextVersion();
String versionListFirstCheck = String.join(",", v2010_del, v1030, v1020, v1017_del, v1015, v1010);
String versionListSecondCheck = String.join(",", v3000_del, v1080, v1050, v1060, v940_del, v1040, v3, v2010_del, v1030, v1020, v1017_del, v1015, v1010);
// simulate updates from a leader
updateJ(jsonAdd(sdoc("id", "B1", "_version_", v1010)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
updateJ(jsonAdd(sdoc("id", "B11", "_version_", v1015)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
updateJ(jsonDelQ("id:B1 id:B11 id:B2 id:B3"), params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", v1017_del));
updateJ(jsonAdd(sdoc("id", "B2", "_version_", v1020)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
updateJ(jsonAdd(sdoc("id", "B3", "_version_", v1030)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
deleteAndGetVersion("B1", params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", v2010_del));
assertJQ(req("qt", "/get", "getVersions", "6"), "=={'versions':[" + versionListFirstCheck + "]}");
assertU(commit());
assertJQ(req("qt", "/get", "getVersions", "6"), "=={'versions':[" + versionListFirstCheck + "]}");
// updates should be buffered, so we should not see any results yet.
assertJQ(req("q", "*:*"), "/response/numFound==0");
// real-time get should also not show anything (this could change in the future,
// but it's currently used for validating version numbers too, so it would
// be bad for updates to be visible if we're just buffering.
assertJQ(req("qt", "/get", "id", "B3"), "=={'doc':null}");
assertEquals(6, bufferedOps.getValue().intValue() - initialOps);
rinfoFuture = ulog.applyBufferedUpdates();
assertTrue(rinfoFuture != null);
assertEquals(UpdateLog.State.APPLYING_BUFFERED, ulog.getState());
logReplay.release(1000);
UpdateLog.RecoveryInfo rinfo = rinfoFuture.get();
assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
assertEquals(6L, applyingBuffered.getCount() - initialApplyingOps);
assertJQ(req("qt", "/get", "getVersions", "6"), "=={'versions':[" + versionListFirstCheck + "]}");
assertJQ(req("q", "*:*"), "/response/numFound==2");
// move back to recovering
ulog.bufferUpdates();
assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
Long ver = getVer(req("qt", "/get", "id", "B3"));
assertEquals(Long.valueOf(v1030), ver);
// add a reordered doc that shouldn't overwrite one in the index
updateJ(jsonAdd(sdoc("id", "B3", "_version_", v3)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
// reorder two buffered updates
updateJ(jsonAdd(sdoc("id", "B4", "_version_", v1040)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
// this update should not take affect
deleteAndGetVersion("B4", params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", v940_del));
updateJ(jsonAdd(sdoc("id", "B6", "_version_", v1060)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
updateJ(jsonAdd(sdoc("id", "B5", "_version_", v1050)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
updateJ(jsonAdd(sdoc("id", "B8", "_version_", v1080)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
// test that delete by query is at least buffered along with everything else so it will delete the
// currently buffered id:8 (even if it doesn't currently support versioning)
updateJ("{\"delete\": { \"query\":\"id:B2 OR id:B8\" }}", params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", v3000_del));
assertJQ(req("qt", "/get", "getVersions", "13"), // the "3" appears because versions aren't checked while buffering
"=={'versions':[" + versionListSecondCheck + "]}");
logReplay.drainPermits();
rinfoFuture = ulog.applyBufferedUpdates();
assertTrue(rinfoFuture != null);
assertEquals(UpdateLog.State.APPLYING_BUFFERED, ulog.getState());
// apply a single update
logReplay.release(1);
// now add another update
updateJ(jsonAdd(sdoc("id", "B7", "_version_", v1070)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
// a reordered update that should be dropped
deleteAndGetVersion("B5", params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", v950_del));
deleteAndGetVersion("B6", params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", v2060_del));
logReplay.release(1000);
UpdateLog.RecoveryInfo recInfo = rinfoFuture.get();
assertJQ(req("q", "*:*", "sort", "id asc", "fl", "id,_version_"), "/response/docs==[" + "{'id':'B3','_version_':" + v1030 + "}" + ",{'id':'B4','_version_':" + v1040 + "}" + ",{'id':'B5','_version_':" + v1050 + "}" + ",{'id':'B7','_version_':" + v1070 + "}" + "]");
assertEquals(1, recInfo.deleteByQuery);
// leave each test method in a good state
assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
assertEquals(0, bufferedOps.getValue().intValue());
} finally {
DirectUpdateHandler2.commitOnClose = true;
UpdateLog.testing_logReplayHook = null;
UpdateLog.testing_logReplayFinishHook = null;
req().close();
}
}
use of com.codahale.metrics.Meter in project lucene-solr by apache.
the class SolrIndexMetricsTest method testIndexMetricsWithDetails.
@Test
public void testIndexMetricsWithDetails() throws Exception {
// test mergeDetails override too
System.setProperty("solr.tests.metrics.merge", "false");
System.setProperty("solr.tests.metrics.mergeDetails", "true");
initCore("solrconfig-indexmetrics.xml", "schema.xml");
addDocs();
MetricRegistry registry = h.getCoreContainer().getMetricManager().registry(h.getCore().getCoreMetricManager().getRegistryName());
assertNotNull(registry);
Map<String, Metric> metrics = registry.getMetrics();
assertTrue(metrics.entrySet().stream().filter(e -> e.getKey().startsWith("INDEX")).count() >= 12);
// check basic index meters
Timer timer = (Timer) metrics.get("INDEX.merge.minor");
assertTrue("minorMerge: " + timer.getCount(), timer.getCount() >= 3);
timer = (Timer) metrics.get("INDEX.merge.major");
assertEquals("majorMerge: " + timer.getCount(), 0, timer.getCount());
// check detailed meters
Meter meter = (Meter) metrics.get("INDEX.merge.major.docs");
assertEquals("majorMergeDocs: " + meter.getCount(), 0, meter.getCount());
meter = (Meter) metrics.get("INDEX.flush");
assertTrue("flush: " + meter.getCount(), meter.getCount() > 10);
}
Aggregations