Search in sources :

Example 21 with Counter

use of com.codahale.metrics.Counter in project lucene-solr by apache.

the class SolrCoreMetricManagerTest method assertRegistered.

private void assertRegistered(String scope, Map<String, Counter> newMetrics, SolrCoreMetricManager coreMetricManager) {
    if (scope == null || newMetrics == null) {
        return;
    }
    String filter = "." + scope + ".";
    MetricRegistry registry = metricManager.registry(coreMetricManager.getRegistryName());
    assertEquals(newMetrics.size(), registry.getMetrics().keySet().stream().filter(s -> s.contains(filter)).count());
    Map<String, Metric> registeredMetrics = registry.getMetrics().entrySet().stream().filter(e -> e.getKey() != null && e.getKey().contains(filter)).collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
    for (Map.Entry<String, Metric> entry : registeredMetrics.entrySet()) {
        String name = entry.getKey();
        Metric expectedMetric = entry.getValue();
        Metric actualMetric = registry.getMetrics().get(name);
        assertNotNull(actualMetric);
        assertEquals(expectedMetric, actualMetric);
    }
}
Also used : CoreAdminParams(org.apache.solr.common.params.CoreAdminParams) MetricRegistry(com.codahale.metrics.MetricRegistry) FieldType(org.apache.solr.schema.FieldType) TestUtil(org.apache.lucene.util.TestUtil) Metric(com.codahale.metrics.Metric) MockMetricReporter(org.apache.solr.metrics.reporters.MockMetricReporter) IOException(java.io.IOException) HashMap(java.util.HashMap) Random(java.util.Random) Test(org.junit.Test) Collectors(java.util.stream.Collectors) SolrTestCaseJ4(org.apache.solr.SolrTestCaseJ4) SolrInfoBean(org.apache.solr.core.SolrInfoBean) Map(java.util.Map) After(org.junit.After) Counter(com.codahale.metrics.Counter) PluginInfo(org.apache.solr.core.PluginInfo) Before(org.junit.Before) MetricRegistry(com.codahale.metrics.MetricRegistry) Metric(com.codahale.metrics.Metric) HashMap(java.util.HashMap) Map(java.util.Map)

Example 22 with Counter

use of com.codahale.metrics.Counter in project lucene-solr by apache.

the class SolrMetricManagerTest method testRegisterAll.

@Test
public void testRegisterAll() throws Exception {
    Random r = random();
    SolrMetricManager metricManager = new SolrMetricManager();
    Map<String, Counter> metrics = SolrMetricTestUtils.getRandomMetrics(r, true);
    MetricRegistry mr = new MetricRegistry();
    for (Map.Entry<String, Counter> entry : metrics.entrySet()) {
        mr.register(entry.getKey(), entry.getValue());
    }
    String registryName = TestUtil.randomSimpleString(r, 1, 10);
    assertEquals(0, metricManager.registry(registryName).getMetrics().size());
    metricManager.registerAll(registryName, mr, false);
    // this should simply skip existing names
    metricManager.registerAll(registryName, mr, true);
    // this should produce error
    try {
        metricManager.registerAll(registryName, mr, false);
        fail("registerAll with duplicate metric names should fail");
    } catch (IllegalArgumentException e) {
    // expected
    }
}
Also used : Counter(com.codahale.metrics.Counter) Random(java.util.Random) MetricRegistry(com.codahale.metrics.MetricRegistry) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 23 with Counter

use of com.codahale.metrics.Counter in project lucene-solr by apache.

the class PeerSyncReplicationTest method test.

@Test
public void test() throws Exception {
    handle.clear();
    handle.put("timestamp", SKIPVAL);
    waitForThingsToLevelOut(30);
    del("*:*");
    // index enough docs and commit to establish frame of reference for PeerSync
    for (int i = 0; i < 100; i++) {
        indexDoc(id, docId, i1, 50, tlong, 50, t1, "document number " + docId++);
    }
    commit();
    waitForThingsToLevelOut(30);
    try {
        checkShardConsistency(false, true);
        long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
        assertEquals(docId, cloudClientDocs);
        CloudJettyRunner initialLeaderJetty = shardToLeaderJetty.get("shard1");
        List<CloudJettyRunner> otherJetties = getOtherAvailableJetties(initialLeaderJetty);
        CloudJettyRunner neverLeader = otherJetties.get(otherJetties.size() - 1);
        otherJetties.remove(neverLeader);
        // first shutdown a node that will never be a leader
        forceNodeFailures(singletonList(neverLeader));
        // node failure and recovery via PeerSync
        log.info("Forcing PeerSync");
        CloudJettyRunner nodePeerSynced = forceNodeFailureAndDoPeerSync(false);
        // add a few more docs
        indexDoc(id, docId, i1, 50, tlong, 50, t1, "document number " + docId++);
        indexDoc(id, docId, i1, 50, tlong, 50, t1, "document number " + docId++);
        commit();
        cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
        assertEquals(docId, cloudClientDocs);
        // now shutdown all other nodes except for 'nodeShutDownForFailure'
        otherJetties.remove(nodePeerSynced);
        forceNodeFailures(otherJetties);
        waitForThingsToLevelOut(30);
        checkShardConsistency(false, true);
        // now shutdown the original leader
        log.info("Now shutting down initial leader");
        forceNodeFailures(singletonList(initialLeaderJetty));
        log.info("Updating mappings from zk");
        waitForNewLeader(cloudClient, "shard1", (Replica) initialLeaderJetty.client.info, new TimeOut(15, SECONDS));
        updateMappingsFromZk(jettys, clients, true);
        assertEquals("PeerSynced node did not become leader", nodePeerSynced, shardToLeaderJetty.get("shard1"));
        // bring up node that was down all along, and let it PeerSync from the node that was forced to PeerSynce  
        bringUpDeadNodeAndEnsureNoReplication(neverLeader, false);
        waitTillNodesActive();
        checkShardConsistency(false, true);
        // bring back all the nodes including initial leader 
        // (commented as reports Maximum concurrent create/delete watches above limit violation and reports thread leaks)
        /*for(int i = 0 ; i < nodesDown.size(); i++) {
        bringUpDeadNodeAndEnsureNoReplication(shardToLeaderJetty.get("shard1"), neverLeader, false);
      }
      checkShardConsistency(false, true);*/
        // make sure leader has not changed after bringing initial leader back
        assertEquals(nodePeerSynced, shardToLeaderJetty.get("shard1"));
        // assert metrics
        MetricRegistry registry = nodePeerSynced.jetty.getCoreContainer().getMetricManager().registry("solr.core.collection1");
        Map<String, Metric> metrics = registry.getMetrics();
        assertTrue("REPLICATION.time present", metrics.containsKey("REPLICATION.time"));
        assertTrue("REPLICATION.errors present", metrics.containsKey("REPLICATION.errors"));
        Timer timer = (Timer) metrics.get("REPLICATION.time");
        assertEquals(1L, timer.getCount());
        Counter counter = (Counter) metrics.get("REPLICATION.errors");
        assertEquals(0L, counter.getCount());
        success = true;
    } finally {
        System.clearProperty("solr.disableFingerprint");
    }
}
Also used : Counter(com.codahale.metrics.Counter) Timer(com.codahale.metrics.Timer) TimeOut(org.apache.solr.util.TimeOut) MetricRegistry(com.codahale.metrics.MetricRegistry) Metric(com.codahale.metrics.Metric) SolrQuery(org.apache.solr.client.solrj.SolrQuery) Test(org.junit.Test)

Example 24 with Counter

use of com.codahale.metrics.Counter in project lucene-solr by apache.

the class HdfsCollectionsAPIDistributedZkTest method moveReplicaTest.

@Test
public void moveReplicaTest() throws Exception {
    cluster.waitForAllNodes(5000);
    String coll = "movereplicatest_coll";
    CloudSolrClient cloudClient = cluster.getSolrClient();
    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
    create.setMaxShardsPerNode(2);
    cloudClient.request(create);
    for (int i = 0; i < 10; i++) {
        cloudClient.add(coll, sdoc("id", String.valueOf(i)));
        cloudClient.commit(coll);
    }
    List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
    Collections.shuffle(slices, random());
    Slice slice = null;
    Replica replica = null;
    for (Slice s : slices) {
        slice = s;
        for (Replica r : s.getReplicas()) {
            if (s.getLeader() != r) {
                replica = r;
            }
        }
    }
    String dataDir = getDataDir(replica);
    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
    ArrayList<String> l = new ArrayList<>(liveNodes);
    Collections.shuffle(l, random());
    String targetNode = null;
    for (String node : liveNodes) {
        if (!replica.getNodeName().equals(node)) {
            targetNode = node;
            break;
        }
    }
    assertNotNull(targetNode);
    CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
    moveReplica.process(cloudClient);
    checkNumOfCores(cloudClient, replica.getNodeName(), 0);
    checkNumOfCores(cloudClient, targetNode, 2);
    waitForState("Wait for recovery finish failed", coll, clusterShape(2, 2));
    slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
    boolean found = false;
    for (Replica newReplica : slice.getReplicas()) {
        if (getDataDir(newReplica).equals(dataDir)) {
            found = true;
        }
    }
    assertTrue(found);
    // data dir is reused so replication will be skipped
    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
        SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
        List<String> registryNames = manager.registryNames().stream().filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
        for (String registry : registryNames) {
            Map<String, Metric> metrics = manager.registry(registry).getMetrics();
            Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
            if (counter != null) {
                assertEquals(0, counter.getCount());
            }
        }
    }
}
Also used : Nightly(com.carrotsearch.randomizedtesting.annotations.Nightly) BadHdfsThreadsFilter(org.apache.solr.util.BadHdfsThreadsFilter) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) BeforeClass(org.junit.BeforeClass) Slow(org.apache.lucene.util.LuceneTestCase.Slow) CoreAdminResponse(org.apache.solr.client.solrj.response.CoreAdminResponse) ArrayList(java.util.ArrayList) SolrServerException(org.apache.solr.client.solrj.SolrServerException) Map(java.util.Map) ThreadLeakFilters(com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters) Counter(com.codahale.metrics.Counter) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) CoreStatus(org.apache.solr.client.solrj.request.CoreStatus) AfterClass(org.junit.AfterClass) Slice(org.apache.solr.common.cloud.Slice) Set(java.util.Set) Metric(com.codahale.metrics.Metric) IOException(java.io.IOException) Test(org.junit.Test) Collectors(java.util.stream.Collectors) Replica(org.apache.solr.common.cloud.Replica) List(java.util.List) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) SolrMetricManager(org.apache.solr.metrics.SolrMetricManager) ZkConfigManager(org.apache.solr.common.cloud.ZkConfigManager) Collections(java.util.Collections) CoreAdminRequest(org.apache.solr.client.solrj.request.CoreAdminRequest) CollectionAdminRequest(org.apache.solr.client.solrj.request.CollectionAdminRequest) CollectionsAPIDistributedZkTest(org.apache.solr.cloud.CollectionsAPIDistributedZkTest) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) ArrayList(java.util.ArrayList) CollectionAdminRequest(org.apache.solr.client.solrj.request.CollectionAdminRequest) Replica(org.apache.solr.common.cloud.Replica) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) Counter(com.codahale.metrics.Counter) Slice(org.apache.solr.common.cloud.Slice) SolrMetricManager(org.apache.solr.metrics.SolrMetricManager) Metric(com.codahale.metrics.Metric) Test(org.junit.Test) CollectionsAPIDistributedZkTest(org.apache.solr.cloud.CollectionsAPIDistributedZkTest)

Example 25 with Counter

use of com.codahale.metrics.Counter in project sling by apache.

the class MetricWrapperTest method counter.

@Test
public void counter() throws Exception {
    Counter counter = registry.counter("test");
    CounterImpl counterStats = new CounterImpl(counter);
    counterStats.increment();
    assertEquals(1, counterStats.getCount());
    assertEquals(1, counter.getCount());
    assertEquals(1, counterStats.getCount());
    counterStats.increment();
    counterStats.increment();
    assertEquals(3, counterStats.getCount());
    counterStats.decrement();
    assertEquals(2, counterStats.getCount());
    assertEquals(2, counter.getCount());
    counterStats.increment(7);
    assertEquals(9, counterStats.getCount());
    assertEquals(9, counter.getCount());
    counterStats.decrement(5);
    assertEquals(4, counterStats.getCount());
    assertEquals(4, counter.getCount());
    assertSame(counter, counterStats.adaptTo(Counter.class));
}
Also used : Counter(com.codahale.metrics.Counter) Test(org.junit.Test)

Aggregations

Counter (com.codahale.metrics.Counter)55 Test (org.junit.Test)24 Map (java.util.Map)15 Timer (com.codahale.metrics.Timer)14 MetricRegistry (com.codahale.metrics.MetricRegistry)13 HashMap (java.util.HashMap)11 Gauge (com.codahale.metrics.Gauge)10 Histogram (com.codahale.metrics.Histogram)10 Metric (com.codahale.metrics.Metric)9 Meter (com.codahale.metrics.Meter)8 Random (java.util.Random)8 SolrInfoBean (org.apache.solr.core.SolrInfoBean)6 SortedMap (java.util.SortedMap)5 IOException (java.io.IOException)4 TreeMap (java.util.TreeMap)4 JettySolrRunner (org.apache.solr.client.solrj.embedded.JettySolrRunner)4 SolrMetricManager (org.apache.solr.metrics.SolrMetricManager)4 Description (com.google.gerrit.metrics.Description)3 ArrayList (java.util.ArrayList)3 Collectors (java.util.stream.Collectors)3