use of com.codahale.metrics.Counter in project lucene-solr by apache.
the class SolrCoreMetricManagerTest method assertRegistered.
private void assertRegistered(String scope, Map<String, Counter> newMetrics, SolrCoreMetricManager coreMetricManager) {
if (scope == null || newMetrics == null) {
return;
}
String filter = "." + scope + ".";
MetricRegistry registry = metricManager.registry(coreMetricManager.getRegistryName());
assertEquals(newMetrics.size(), registry.getMetrics().keySet().stream().filter(s -> s.contains(filter)).count());
Map<String, Metric> registeredMetrics = registry.getMetrics().entrySet().stream().filter(e -> e.getKey() != null && e.getKey().contains(filter)).collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
for (Map.Entry<String, Metric> entry : registeredMetrics.entrySet()) {
String name = entry.getKey();
Metric expectedMetric = entry.getValue();
Metric actualMetric = registry.getMetrics().get(name);
assertNotNull(actualMetric);
assertEquals(expectedMetric, actualMetric);
}
}
use of com.codahale.metrics.Counter in project lucene-solr by apache.
the class SolrMetricManagerTest method testRegisterAll.
@Test
public void testRegisterAll() throws Exception {
Random r = random();
SolrMetricManager metricManager = new SolrMetricManager();
Map<String, Counter> metrics = SolrMetricTestUtils.getRandomMetrics(r, true);
MetricRegistry mr = new MetricRegistry();
for (Map.Entry<String, Counter> entry : metrics.entrySet()) {
mr.register(entry.getKey(), entry.getValue());
}
String registryName = TestUtil.randomSimpleString(r, 1, 10);
assertEquals(0, metricManager.registry(registryName).getMetrics().size());
metricManager.registerAll(registryName, mr, false);
// this should simply skip existing names
metricManager.registerAll(registryName, mr, true);
// this should produce error
try {
metricManager.registerAll(registryName, mr, false);
fail("registerAll with duplicate metric names should fail");
} catch (IllegalArgumentException e) {
// expected
}
}
use of com.codahale.metrics.Counter in project lucene-solr by apache.
the class PeerSyncReplicationTest method test.
@Test
public void test() throws Exception {
handle.clear();
handle.put("timestamp", SKIPVAL);
waitForThingsToLevelOut(30);
del("*:*");
// index enough docs and commit to establish frame of reference for PeerSync
for (int i = 0; i < 100; i++) {
indexDoc(id, docId, i1, 50, tlong, 50, t1, "document number " + docId++);
}
commit();
waitForThingsToLevelOut(30);
try {
checkShardConsistency(false, true);
long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
assertEquals(docId, cloudClientDocs);
CloudJettyRunner initialLeaderJetty = shardToLeaderJetty.get("shard1");
List<CloudJettyRunner> otherJetties = getOtherAvailableJetties(initialLeaderJetty);
CloudJettyRunner neverLeader = otherJetties.get(otherJetties.size() - 1);
otherJetties.remove(neverLeader);
// first shutdown a node that will never be a leader
forceNodeFailures(singletonList(neverLeader));
// node failure and recovery via PeerSync
log.info("Forcing PeerSync");
CloudJettyRunner nodePeerSynced = forceNodeFailureAndDoPeerSync(false);
// add a few more docs
indexDoc(id, docId, i1, 50, tlong, 50, t1, "document number " + docId++);
indexDoc(id, docId, i1, 50, tlong, 50, t1, "document number " + docId++);
commit();
cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
assertEquals(docId, cloudClientDocs);
// now shutdown all other nodes except for 'nodeShutDownForFailure'
otherJetties.remove(nodePeerSynced);
forceNodeFailures(otherJetties);
waitForThingsToLevelOut(30);
checkShardConsistency(false, true);
// now shutdown the original leader
log.info("Now shutting down initial leader");
forceNodeFailures(singletonList(initialLeaderJetty));
log.info("Updating mappings from zk");
waitForNewLeader(cloudClient, "shard1", (Replica) initialLeaderJetty.client.info, new TimeOut(15, SECONDS));
updateMappingsFromZk(jettys, clients, true);
assertEquals("PeerSynced node did not become leader", nodePeerSynced, shardToLeaderJetty.get("shard1"));
// bring up node that was down all along, and let it PeerSync from the node that was forced to PeerSynce
bringUpDeadNodeAndEnsureNoReplication(neverLeader, false);
waitTillNodesActive();
checkShardConsistency(false, true);
// bring back all the nodes including initial leader
// (commented as reports Maximum concurrent create/delete watches above limit violation and reports thread leaks)
/*for(int i = 0 ; i < nodesDown.size(); i++) {
bringUpDeadNodeAndEnsureNoReplication(shardToLeaderJetty.get("shard1"), neverLeader, false);
}
checkShardConsistency(false, true);*/
// make sure leader has not changed after bringing initial leader back
assertEquals(nodePeerSynced, shardToLeaderJetty.get("shard1"));
// assert metrics
MetricRegistry registry = nodePeerSynced.jetty.getCoreContainer().getMetricManager().registry("solr.core.collection1");
Map<String, Metric> metrics = registry.getMetrics();
assertTrue("REPLICATION.time present", metrics.containsKey("REPLICATION.time"));
assertTrue("REPLICATION.errors present", metrics.containsKey("REPLICATION.errors"));
Timer timer = (Timer) metrics.get("REPLICATION.time");
assertEquals(1L, timer.getCount());
Counter counter = (Counter) metrics.get("REPLICATION.errors");
assertEquals(0L, counter.getCount());
success = true;
} finally {
System.clearProperty("solr.disableFingerprint");
}
}
use of com.codahale.metrics.Counter in project lucene-solr by apache.
the class HdfsCollectionsAPIDistributedZkTest method moveReplicaTest.
@Test
public void moveReplicaTest() throws Exception {
cluster.waitForAllNodes(5000);
String coll = "movereplicatest_coll";
CloudSolrClient cloudClient = cluster.getSolrClient();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
create.setMaxShardsPerNode(2);
cloudClient.request(create);
for (int i = 0; i < 10; i++) {
cloudClient.add(coll, sdoc("id", String.valueOf(i)));
cloudClient.commit(coll);
}
List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
Collections.shuffle(slices, random());
Slice slice = null;
Replica replica = null;
for (Slice s : slices) {
slice = s;
for (Replica r : s.getReplicas()) {
if (s.getLeader() != r) {
replica = r;
}
}
}
String dataDir = getDataDir(replica);
Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
ArrayList<String> l = new ArrayList<>(liveNodes);
Collections.shuffle(l, random());
String targetNode = null;
for (String node : liveNodes) {
if (!replica.getNodeName().equals(node)) {
targetNode = node;
break;
}
}
assertNotNull(targetNode);
CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
moveReplica.process(cloudClient);
checkNumOfCores(cloudClient, replica.getNodeName(), 0);
checkNumOfCores(cloudClient, targetNode, 2);
waitForState("Wait for recovery finish failed", coll, clusterShape(2, 2));
slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
boolean found = false;
for (Replica newReplica : slice.getReplicas()) {
if (getDataDir(newReplica).equals(dataDir)) {
found = true;
}
}
assertTrue(found);
// data dir is reused so replication will be skipped
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
List<String> registryNames = manager.registryNames().stream().filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
for (String registry : registryNames) {
Map<String, Metric> metrics = manager.registry(registry).getMetrics();
Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
if (counter != null) {
assertEquals(0, counter.getCount());
}
}
}
}
use of com.codahale.metrics.Counter in project sling by apache.
the class MetricWrapperTest method counter.
@Test
public void counter() throws Exception {
Counter counter = registry.counter("test");
CounterImpl counterStats = new CounterImpl(counter);
counterStats.increment();
assertEquals(1, counterStats.getCount());
assertEquals(1, counter.getCount());
assertEquals(1, counterStats.getCount());
counterStats.increment();
counterStats.increment();
assertEquals(3, counterStats.getCount());
counterStats.decrement();
assertEquals(2, counterStats.getCount());
assertEquals(2, counter.getCount());
counterStats.increment(7);
assertEquals(9, counterStats.getCount());
assertEquals(9, counter.getCount());
counterStats.decrement(5);
assertEquals(4, counterStats.getCount());
assertEquals(4, counter.getCount());
assertSame(counter, counterStats.adaptTo(Counter.class));
}
Aggregations