Search in sources :

Example 1 with MetricsCollector

use of io.cdap.cdap.api.metrics.MetricsCollector in project cdap by caskdata.

the class TableTest method testMetrics.

private void testMetrics(boolean readless) throws Exception {
    final String tableName = "survive";
    DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
    DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
    admin.create();
    try (Table table = getTable(CONTEXT1, tableName, props)) {
        final Map<String, Long> metrics = Maps.newHashMap();
        ((MeteredDataset) table).setMetricsCollector(new MetricsCollector() {

            @Override
            public void increment(String metricName, long value) {
                Long old = metrics.get(metricName);
                metrics.put(metricName, old == null ? value : old + value);
            }

            @Override
            public void gauge(String metricName, long value) {
                metrics.put(metricName, value);
            }
        });
        // Note that we don't need to finish tx for metrics to be reported
        Transaction tx0 = txClient.startShort();
        ((TransactionAware) table).startTx(tx0);
        int writes = 0;
        int reads = 0;
        table.put(new Put(R1, C1, V1));
        verifyDatasetMetrics(metrics, ++writes, reads);
        table.compareAndSwap(R1, C1, V1, V2);
        verifyDatasetMetrics(metrics, ++writes, ++reads);
        // note: will not write anything as expected value will not match
        table.compareAndSwap(R1, C1, V1, V2);
        verifyDatasetMetrics(metrics, writes, ++reads);
        table.increment(new Increment(R2, C2, 1L));
        if (readless) {
            verifyDatasetMetrics(metrics, ++writes, reads);
        } else {
            verifyDatasetMetrics(metrics, ++writes, ++reads);
        }
        table.incrementAndGet(new Increment(R2, C2, 1L));
        verifyDatasetMetrics(metrics, ++writes, ++reads);
        table.get(new Get(R1, C1, V1));
        verifyDatasetMetrics(metrics, writes, ++reads);
        Scanner scanner = table.scan(new Scan(null, null));
        while (scanner.next() != null) {
            verifyDatasetMetrics(metrics, writes, ++reads);
        }
        table.delete(new Delete(R1, C1, V1));
        verifyDatasetMetrics(metrics, ++writes, reads);
    } finally {
        // drop table
        admin.drop();
    }
}
Also used : MetricsCollector(io.cdap.cdap.api.metrics.MetricsCollector) Delete(io.cdap.cdap.api.dataset.table.Delete) Scanner(io.cdap.cdap.api.dataset.table.Scanner) Table(io.cdap.cdap.api.dataset.table.Table) HBaseTable(io.cdap.cdap.data2.dataset2.lib.table.hbase.HBaseTable) DatasetProperties(io.cdap.cdap.api.dataset.DatasetProperties) DatasetAdmin(io.cdap.cdap.api.dataset.DatasetAdmin) Put(io.cdap.cdap.api.dataset.table.Put) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) Increment(io.cdap.cdap.api.dataset.table.Increment) Get(io.cdap.cdap.api.dataset.table.Get) MeteredDataset(io.cdap.cdap.api.dataset.metrics.MeteredDataset) Scan(io.cdap.cdap.api.dataset.table.Scan)

Example 2 with MetricsCollector

use of io.cdap.cdap.api.metrics.MetricsCollector in project cdap by caskdata.

the class ConcurrentMessageWriterTest method testMultiMaxSequence.

@Test
public void testMultiMaxSequence() throws IOException, InterruptedException {
    TopicId topicId = new NamespaceId("ns1").topic("t1");
    final TopicMetadata metadata = new TopicMetadata(topicId, new HashMap<String, String>(), 1);
    // This test the case when multiple StoreRequests combined exceeding the 65536 payload.
    // See testMaxSequence() for more details when it matters
    // Generate 3 StoreRequests, each with 43690 messages
    int msgCount = StoreRequestWriter.SEQUENCE_ID_LIMIT / 3 * 2;
    int requestCount = 3;
    List<StoreRequest> requests = new ArrayList<>();
    for (int i = 0; i < requestCount; i++) {
        List<String> payloads = new ArrayList<>(msgCount);
        for (int j = 0; j < msgCount; j++) {
            payloads.add(Integer.toString(j));
        }
        requests.add(new TestStoreRequest(topicId, payloads));
    }
    TestStoreRequestWriter testWriter = new TestStoreRequestWriter(new TimeProvider.IncrementalTimeProvider());
    // We use a custom metrics collector here to make all the persist calls reached the same latch,
    // since we know that the ConcurrentMessageWriter will emit a metrics "persist.requested" after enqueued but
    // before flushing.
    // This will make all requests batched together
    final CountDownLatch latch = new CountDownLatch(requestCount);
    final ConcurrentMessageWriter writer = new ConcurrentMessageWriter(testWriter, new MetricsCollector() {

        @Override
        public void increment(String metricName, long value) {
            if ("persist.requested".equals(metricName)) {
                latch.countDown();
                Uninterruptibles.awaitUninterruptibly(latch);
            }
        }

        @Override
        public void gauge(String metricName, long value) {
            LOG.info("MetricsContext.gauge: {} = {}", metricName, value);
        }
    });
    ExecutorService executor = Executors.newFixedThreadPool(3);
    for (final StoreRequest request : requests) {
        executor.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    writer.persist(request, metadata);
                } catch (IOException e) {
                    LOG.error("Failed to persist", e);
                }
            }
        });
    }
    executor.shutdown();
    Assert.assertTrue(executor.awaitTermination(1, TimeUnit.MINUTES));
    // Validates all messages are being written
    List<RawMessage> messages = testWriter.getMessages().get(topicId);
    Assert.assertEquals(requestCount * msgCount, messages.size());
    // We expect the payload is in repeated sequence of [0..msgCount-1]
    int expectedPayload = 0;
    // The sequenceId should be (i % SEQUENCE_ID_LIMIT)
    for (int i = 0; i < messages.size(); i++) {
        RawMessage message = messages.get(i);
        MessageId messageId = new MessageId(message.getId());
        Assert.assertEquals(i / StoreRequestWriter.SEQUENCE_ID_LIMIT, messageId.getPublishTimestamp());
        Assert.assertEquals((short) (i % StoreRequestWriter.SEQUENCE_ID_LIMIT), messageId.getSequenceId());
        Assert.assertEquals(expectedPayload, Integer.parseInt(Bytes.toString(message.getPayload())));
        expectedPayload = (expectedPayload + 1) % msgCount;
    }
}
Also used : ArrayList(java.util.ArrayList) TopicId(io.cdap.cdap.proto.id.TopicId) RawMessage(io.cdap.cdap.messaging.data.RawMessage) MetricsCollector(io.cdap.cdap.api.metrics.MetricsCollector) TimeProvider(io.cdap.cdap.common.utils.TimeProvider) StoreRequest(io.cdap.cdap.messaging.StoreRequest) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) TopicMetadata(io.cdap.cdap.messaging.TopicMetadata) ExecutorService(java.util.concurrent.ExecutorService) NamespaceId(io.cdap.cdap.proto.id.NamespaceId) MessageId(io.cdap.cdap.messaging.data.MessageId) Test(org.junit.Test)

Example 3 with MetricsCollector

use of io.cdap.cdap.api.metrics.MetricsCollector in project cdap by caskdata.

the class AuditMetadataStorage method emitMetrics.

private void emitMetrics(String metricSuffix) {
    MetricsCollector metricsCollector = metricsCollectionService.getContext(Constants.Metrics.STORAGE_METRICS_TAGS);
    metricsCollector.increment(Constants.Metrics.MetadataStorage.METRICS_PREFIX + metricSuffix, 1L);
}
Also used : MetricsCollector(io.cdap.cdap.api.metrics.MetricsCollector)

Aggregations

MetricsCollector (io.cdap.cdap.api.metrics.MetricsCollector)3 DatasetAdmin (io.cdap.cdap.api.dataset.DatasetAdmin)1 DatasetProperties (io.cdap.cdap.api.dataset.DatasetProperties)1 MeteredDataset (io.cdap.cdap.api.dataset.metrics.MeteredDataset)1 Delete (io.cdap.cdap.api.dataset.table.Delete)1 Get (io.cdap.cdap.api.dataset.table.Get)1 Increment (io.cdap.cdap.api.dataset.table.Increment)1 Put (io.cdap.cdap.api.dataset.table.Put)1 Scan (io.cdap.cdap.api.dataset.table.Scan)1 Scanner (io.cdap.cdap.api.dataset.table.Scanner)1 Table (io.cdap.cdap.api.dataset.table.Table)1 TimeProvider (io.cdap.cdap.common.utils.TimeProvider)1 HBaseTable (io.cdap.cdap.data2.dataset2.lib.table.hbase.HBaseTable)1 StoreRequest (io.cdap.cdap.messaging.StoreRequest)1 TopicMetadata (io.cdap.cdap.messaging.TopicMetadata)1 MessageId (io.cdap.cdap.messaging.data.MessageId)1 RawMessage (io.cdap.cdap.messaging.data.RawMessage)1 NamespaceId (io.cdap.cdap.proto.id.NamespaceId)1 TopicId (io.cdap.cdap.proto.id.TopicId)1 IOException (java.io.IOException)1