Search in sources :

Example 41 with MetricRegistry

use of com.codahale.metrics.MetricRegistry in project caffeine by ben-manes.

the class MetricsStatsCounterTest method metrics.

@Test
public void metrics() {
    // Use a registry that is exported using a Reporter (via console, JMX, Graphite, etc)
    MetricRegistry registry = new MetricRegistry();
    // Create the cache with a dedicated, uniquely named stats counter
    LoadingCache<Integer, Integer> cache = Caffeine.newBuilder().recordStats(() -> new MetricsStatsCounter(registry, "example")).build(key -> key);
    // Perform application work
    for (int i = 0; i < 4; i++) {
        cache.get(1);
    }
    // Statistics can be queried and reported on
    assertThat(cache.stats().hitCount(), is(3L));
    assertThat(registry.meter("example.hits").getCount(), is(3L));
}
Also used : MetricRegistry(com.codahale.metrics.MetricRegistry) Test(org.testng.annotations.Test)

Example 42 with MetricRegistry

use of com.codahale.metrics.MetricRegistry in project ranger by apache.

the class KMSWebApp method contextInitialized.

@Override
public void contextInitialized(ServletContextEvent sce) {
    try {
        String confDir = System.getProperty(KMSConfiguration.KMS_CONFIG_DIR);
        if (confDir == null) {
            throw new RuntimeException("System property '" + KMSConfiguration.KMS_CONFIG_DIR + "' not defined");
        }
        kmsConf = KMSConfiguration.getKMSConf();
        initLogging(confDir);
        LOG.info("-------------------------------------------------------------");
        LOG.info("  Java runtime version : {}", System.getProperty("java.runtime.version"));
        LOG.info("  KMS Hadoop Version: " + VersionInfo.getVersion());
        LOG.info("-------------------------------------------------------------");
        kmsAcls = getAcls(kmsConf.get(KMSConfiguration.KMS_SECURITY_AUTHORIZER));
        kmsAcls.startReloader();
        metricRegistry = new MetricRegistry();
        jmxReporter = JmxReporter.forRegistry(metricRegistry).build();
        jmxReporter.start();
        generateEEKCallsMeter = metricRegistry.register(GENERATE_EEK_METER, new Meter());
        decryptEEKCallsMeter = metricRegistry.register(DECRYPT_EEK_METER, new Meter());
        adminCallsMeter = metricRegistry.register(ADMIN_CALLS_METER, new Meter());
        keyCallsMeter = metricRegistry.register(KEY_CALLS_METER, new Meter());
        invalidCallsMeter = metricRegistry.register(INVALID_CALLS_METER, new Meter());
        unauthorizedCallsMeter = metricRegistry.register(UNAUTHORIZED_CALLS_METER, new Meter());
        unauthenticatedCallsMeter = metricRegistry.register(UNAUTHENTICATED_CALLS_METER, new Meter());
        kmsAudit = new KMSAudit(kmsConf.getLong(KMSConfiguration.KMS_AUDIT_AGGREGATION_WINDOW, KMSConfiguration.KMS_AUDIT_AGGREGATION_WINDOW_DEFAULT));
        // thus the '*' ACL.
        if (sce != null) {
            sce.getServletContext().setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE, kmsConf);
            sce.getServletContext().setAttribute(HttpServer2.ADMINS_ACL, new AccessControlList(AccessControlList.WILDCARD_ACL_VALUE));
        }
        // intializing the KeyProvider
        String providerString = kmsConf.get(KMSConfiguration.KEY_PROVIDER_URI);
        if (providerString == null) {
            throw new IllegalStateException("No KeyProvider has been defined");
        }
        Log.info("------------------ Ranger KMSWebApp---------------------");
        Log.info("provider string = " + providerString);
        Log.info("URI = " + new URI(providerString).toString() + " scheme = " + new URI(providerString).getScheme());
        Log.info("kmsconf size= " + kmsConf.size() + " kms classname=" + kmsConf.getClass().getName());
        Log.info("----------------Instantiating key provider ---------------");
        KeyProvider keyProvider = KeyProviderFactory.get(new URI(providerString), kmsConf);
        Log.info("keyProvider = " + keyProvider.toString());
        if (kmsConf.getBoolean(KMSConfiguration.KEY_CACHE_ENABLE, KMSConfiguration.KEY_CACHE_ENABLE_DEFAULT)) {
            long keyTimeOutMillis = kmsConf.getLong(KMSConfiguration.KEY_CACHE_TIMEOUT_KEY, KMSConfiguration.KEY_CACHE_TIMEOUT_DEFAULT);
            long currKeyTimeOutMillis = kmsConf.getLong(KMSConfiguration.CURR_KEY_CACHE_TIMEOUT_KEY, KMSConfiguration.CURR_KEY_CACHE_TIMEOUT_DEFAULT);
            keyProvider = new CachingKeyProvider(keyProvider, keyTimeOutMillis, currKeyTimeOutMillis);
        }
        LOG.info("Initialized KeyProvider " + keyProvider);
        keyProviderCryptoExtension = KeyProviderCryptoExtension.createKeyProviderCryptoExtension(keyProvider);
        keyProviderCryptoExtension = new EagerKeyGeneratorKeyProviderCryptoExtension(kmsConf, keyProviderCryptoExtension);
        if (kmsConf.getBoolean(KMSConfiguration.KEY_AUTHORIZATION_ENABLE, KMSConfiguration.KEY_AUTHORIZATION_ENABLE_DEFAULT)) {
            keyProviderCryptoExtension = new KeyAuthorizationKeyProvider(keyProviderCryptoExtension, kmsAcls);
        }
        LOG.info("Initialized KeyProviderCryptoExtension " + keyProviderCryptoExtension);
        final int defaultBitlength = kmsConf.getInt(KeyProvider.DEFAULT_BITLENGTH_NAME, KeyProvider.DEFAULT_BITLENGTH);
        LOG.info("Default key bitlength is {}", defaultBitlength);
        LOG.info("Ranger KMS Started");
    } catch (Throwable ex) {
        System.out.println();
        System.out.println("ERROR: Hadoop KMS could not be started");
        System.out.println();
        System.out.println("REASON: " + ex.toString());
        System.out.println();
        System.out.println("Stacktrace:");
        System.out.println("---------------------------------------------------");
        ex.printStackTrace(System.out);
        System.out.println("---------------------------------------------------");
        System.out.println();
        System.exit(1);
    }
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) CachingKeyProvider(org.apache.hadoop.crypto.key.CachingKeyProvider) KeyProvider(org.apache.hadoop.crypto.key.KeyProvider) Meter(com.codahale.metrics.Meter) MetricRegistry(com.codahale.metrics.MetricRegistry) URI(java.net.URI) CachingKeyProvider(org.apache.hadoop.crypto.key.CachingKeyProvider)

Example 43 with MetricRegistry

use of com.codahale.metrics.MetricRegistry in project ambry by linkedin.

the class ReplicationTest method replicationPauseTest.

/**
 * Tests pausing replication for all and individual partitions.
 * @throws Exception
 */
@Test
public void replicationPauseTest() throws Exception {
    MockClusterMap clusterMap = new MockClusterMap();
    Host localHost = new Host(clusterMap.getDataNodeIds().get(0), clusterMap);
    Host remoteHost = new Host(clusterMap.getDataNodeIds().get(1), clusterMap);
    List<PartitionId> partitionIds = clusterMap.getAllPartitionIds();
    for (PartitionId partitionId : partitionIds) {
        // add  10 messages to the remote host only
        addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 10);
    }
    Properties properties = new Properties();
    properties.put("replication.wait.time.between.replicas.ms", "0");
    ReplicationConfig config = new ReplicationConfig(new VerifiableProperties(properties));
    ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), clusterMap.getReplicaIds(localHost.dataNodeId));
    replicationMetrics.populatePerColoMetrics(Collections.singleton(remoteHost.dataNodeId.getDatacenterName()));
    StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
    Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = new HashMap<>();
    replicasToReplicate.put(remoteHost.dataNodeId, localHost.getRemoteReplicaInfos(remoteHost, null));
    Map<DataNodeId, Host> hosts = new HashMap<>();
    hosts.put(remoteHost.dataNodeId, remoteHost);
    int batchSize = 4;
    MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, batchSize);
    ReplicaThread replicaThread = new ReplicaThread("threadtest", replicasToReplicate, new MockFindTokenFactory(), clusterMap, new AtomicInteger(0), localHost.dataNodeId, connectionPool, config, replicationMetrics, null, storeKeyFactory, true, clusterMap.getMetricRegistry(), false, localHost.dataNodeId.getDatacenterName(), new ResponseHandler(clusterMap));
    Map<PartitionId, Integer> progressTracker = new HashMap<>();
    PartitionId idToLeaveOut = clusterMap.getAllPartitionIds().get(0);
    boolean allStopped = false;
    boolean onlyOneResumed = false;
    boolean allReenabled = false;
    Set<PartitionId> expectedPaused = new HashSet<>();
    assertEquals("There should be no disabled partitions", expectedPaused, replicaThread.getReplicationDisabledPartitions());
    while (true) {
        replicaThread.replicate(new ArrayList<>(replicasToReplicate.values()));
        boolean replicationDone = true;
        for (RemoteReplicaInfo replicaInfo : replicasToReplicate.get(remoteHost.dataNodeId)) {
            PartitionId id = replicaInfo.getReplicaId().getPartitionId();
            MockFindToken token = (MockFindToken) replicaInfo.getToken();
            int lastProgress = progressTracker.computeIfAbsent(id, id1 -> 0);
            int currentProgress = token.getIndex();
            boolean partDone = currentProgress + 1 == remoteHost.infosByPartition.get(id).size();
            if (allStopped || (onlyOneResumed && !id.equals(idToLeaveOut))) {
                assertEquals("There should have been no progress", lastProgress, currentProgress);
            } else if (!partDone) {
                assertTrue("There has been no progress", currentProgress > lastProgress);
                progressTracker.put(id, currentProgress);
            }
            replicationDone = replicationDone && partDone;
        }
        if (!allStopped && !onlyOneResumed && !allReenabled) {
            replicaThread.controlReplicationForPartitions(clusterMap.getAllPartitionIds(), false);
            expectedPaused.addAll(clusterMap.getAllPartitionIds());
            assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
            allStopped = true;
        } else if (!onlyOneResumed && !allReenabled) {
            // resume replication for first partition
            replicaThread.controlReplicationForPartitions(Collections.singletonList(partitionIds.get(0)), true);
            expectedPaused.remove(partitionIds.get(0));
            assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
            allStopped = false;
            onlyOneResumed = true;
        } else if (!allReenabled) {
            // not removing the first partition
            replicaThread.controlReplicationForPartitions(clusterMap.getAllPartitionIds(), true);
            onlyOneResumed = false;
            allReenabled = true;
            expectedPaused.clear();
            assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
        }
        if (replicationDone) {
            break;
        }
    }
    Map<PartitionId, List<MessageInfo>> missingInfos = remoteHost.getMissingInfos(localHost.infosByPartition);
    for (Map.Entry<PartitionId, List<MessageInfo>> entry : missingInfos.entrySet()) {
        assertEquals("No infos should be missing", 0, entry.getValue().size());
    }
    Map<PartitionId, List<ByteBuffer>> missingBuffers = remoteHost.getMissingBuffers(localHost.buffersByPartition);
    for (Map.Entry<PartitionId, List<ByteBuffer>> entry : missingBuffers.entrySet()) {
        assertEquals("No buffers should be missing", 0, entry.getValue().size());
    }
}
Also used : ResponseHandler(com.github.ambry.commons.ResponseHandler) HashMap(java.util.HashMap) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) List(java.util.List) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ReplicationConfig(com.github.ambry.config.ReplicationConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) PartitionId(com.github.ambry.clustermap.PartitionId) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) DataNodeId(com.github.ambry.clustermap.DataNodeId) Map(java.util.Map) HashMap(java.util.HashMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 44 with MetricRegistry

use of com.codahale.metrics.MetricRegistry in project ambry by linkedin.

the class BadRestRequest method edgeCaseWorkerCountsTest.

/**
 * Tests the behavior of {@link AsyncRequestResponseHandler} when request worker count is not set or is zero.
 * @throws Exception
 */
@Test
public void edgeCaseWorkerCountsTest() throws Exception {
    RequestResponseHandlerMetrics metrics = new RequestResponseHandlerMetrics(new MetricRegistry());
    AsyncRequestResponseHandler requestResponseHandler = new AsyncRequestResponseHandler(metrics);
    noRequestHandlersTest(requestResponseHandler);
    requestResponseHandler = getAsyncRequestResponseHandler(0);
    noRequestHandlersTest(requestResponseHandler);
}
Also used : MetricRegistry(com.codahale.metrics.MetricRegistry) Test(org.junit.Test)

Example 45 with MetricRegistry

use of com.codahale.metrics.MetricRegistry in project ambry by linkedin.

the class NettyServerFactoryTest method getNettyServerFactoryWithBadInputTest.

/**
 * Tests instantiation of {@link NettyServerFactory} with bad input.
 */
@Test
public void getNettyServerFactoryWithBadInputTest() {
    Properties properties = new Properties();
    properties.setProperty("netty.server.enable.ssl", "true");
    VerifiableProperties verifiableProperties = new VerifiableProperties(properties);
    MetricRegistry metricRegistry = new MetricRegistry();
    doConstructionFailureTest(null, metricRegistry, REST_REQUEST_HANDLER, PUBLIC_ACCESS_LOGGER, REST_SERVER_STATE, SSL_FACTORY);
    doConstructionFailureTest(verifiableProperties, null, REST_REQUEST_HANDLER, PUBLIC_ACCESS_LOGGER, REST_SERVER_STATE, SSL_FACTORY);
    doConstructionFailureTest(verifiableProperties, metricRegistry, null, PUBLIC_ACCESS_LOGGER, REST_SERVER_STATE, SSL_FACTORY);
    doConstructionFailureTest(verifiableProperties, metricRegistry, REST_REQUEST_HANDLER, null, REST_SERVER_STATE, SSL_FACTORY);
    doConstructionFailureTest(verifiableProperties, metricRegistry, REST_REQUEST_HANDLER, PUBLIC_ACCESS_LOGGER, null, SSL_FACTORY);
    doConstructionFailureTest(verifiableProperties, metricRegistry, REST_REQUEST_HANDLER, PUBLIC_ACCESS_LOGGER, REST_SERVER_STATE, null);
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) Test(org.junit.Test)

Aggregations

MetricRegistry (com.codahale.metrics.MetricRegistry)505 Test (org.junit.Test)177 Before (org.junit.Before)61 Test (org.junit.jupiter.api.Test)45 VerifiableProperties (com.github.ambry.config.VerifiableProperties)42 ArrayList (java.util.ArrayList)33 Counter (com.codahale.metrics.Counter)30 File (java.io.File)29 Properties (java.util.Properties)28 List (java.util.List)23 Metric (com.codahale.metrics.Metric)22 Map (java.util.Map)22 IOException (java.io.IOException)21 HashMap (java.util.HashMap)20 Size (com.github.joschi.jadconfig.util.Size)17 CountDownLatch (java.util.concurrent.CountDownLatch)17 TimeUnit (java.util.concurrent.TimeUnit)17 Timer (com.codahale.metrics.Timer)15 DefaultTaggedMetricRegistry (com.palantir.tritium.metrics.registry.DefaultTaggedMetricRegistry)15 ResourceConfig (org.glassfish.jersey.server.ResourceConfig)15