Search in sources :

Example 1 with MetricsCollector

use of co.cask.cdap.api.metrics.MetricsCollector in project cdap by caskdata.

the class ConcurrentMessageWriterTest method testMultiMaxSequence.

@Test
public void testMultiMaxSequence() throws IOException, InterruptedException {
    TopicId topicId = new NamespaceId("ns1").topic("t1");
    final TopicMetadata metadata = new TopicMetadata(topicId, new HashMap<String, String>(), 1);
    // This test the case when multiple StoreRequests combined exceeding the 65536 payload.
    // See testMaxSequence() for more details when it matters
    // Generate 3 StoreRequests, each with 43690 messages
    int msgCount = StoreRequestWriter.SEQUENCE_ID_LIMIT / 3 * 2;
    int requestCount = 3;
    List<StoreRequest> requests = new ArrayList<>();
    for (int i = 0; i < requestCount; i++) {
        List<String> payloads = new ArrayList<>(msgCount);
        for (int j = 0; j < msgCount; j++) {
            payloads.add(Integer.toString(j));
        }
        requests.add(new TestStoreRequest(topicId, payloads));
    }
    TestStoreRequestWriter testWriter = new TestStoreRequestWriter(new TimeProvider.IncrementalTimeProvider());
    // We use a custom metrics collector here to make all the persist calls reached the same latch,
    // since we know that the ConcurrentMessageWriter will emit a metrics "persist.requested" after enqueued but
    // before flushing.
    // This will make all requests batched together
    final CountDownLatch latch = new CountDownLatch(requestCount);
    final ConcurrentMessageWriter writer = new ConcurrentMessageWriter(testWriter, new MetricsCollector() {

        @Override
        public void increment(String metricName, long value) {
            if ("persist.requested".equals(metricName)) {
                latch.countDown();
                Uninterruptibles.awaitUninterruptibly(latch);
            }
        }

        @Override
        public void gauge(String metricName, long value) {
            LOG.info("MetricsContext.gauge: {} = {}", metricName, value);
        }
    });
    ExecutorService executor = Executors.newFixedThreadPool(3);
    for (final StoreRequest request : requests) {
        executor.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    writer.persist(request, metadata);
                } catch (IOException e) {
                    LOG.error("Failed to persist", e);
                }
            }
        });
    }
    executor.shutdown();
    Assert.assertTrue(executor.awaitTermination(1, TimeUnit.MINUTES));
    // Validates all messages are being written
    List<RawMessage> messages = testWriter.getMessages().get(topicId);
    Assert.assertEquals(requestCount * msgCount, messages.size());
    // We expect the payload is in repeated sequence of [0..msgCount-1]
    int expectedPayload = 0;
    // The sequenceId should be (i % SEQUENCE_ID_LIMIT)
    for (int i = 0; i < messages.size(); i++) {
        RawMessage message = messages.get(i);
        MessageId messageId = new MessageId(message.getId());
        Assert.assertEquals(i / StoreRequestWriter.SEQUENCE_ID_LIMIT, messageId.getPublishTimestamp());
        Assert.assertEquals((short) (i % StoreRequestWriter.SEQUENCE_ID_LIMIT), messageId.getSequenceId());
        Assert.assertEquals(expectedPayload, Integer.parseInt(Bytes.toString(message.getPayload())));
        expectedPayload = (expectedPayload + 1) % msgCount;
    }
}
Also used : ArrayList(java.util.ArrayList) TopicId(co.cask.cdap.proto.id.TopicId) RawMessage(co.cask.cdap.messaging.data.RawMessage) MetricsCollector(co.cask.cdap.api.metrics.MetricsCollector) TimeProvider(co.cask.cdap.common.utils.TimeProvider) StoreRequest(co.cask.cdap.messaging.StoreRequest) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) TopicMetadata(co.cask.cdap.messaging.TopicMetadata) ExecutorService(java.util.concurrent.ExecutorService) NamespaceId(co.cask.cdap.proto.id.NamespaceId) MessageId(co.cask.cdap.messaging.data.MessageId) Test(org.junit.Test)

Example 2 with MetricsCollector

use of co.cask.cdap.api.metrics.MetricsCollector in project cdap by caskdata.

the class TableTest method testMetrics.

private void testMetrics(boolean readless) throws Exception {
    final String tableName = "survive";
    DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
    DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
    admin.create();
    Table table = getTable(CONTEXT1, tableName, props);
    final Map<String, Long> metrics = Maps.newHashMap();
    ((MeteredDataset) table).setMetricsCollector(new MetricsCollector() {

        @Override
        public void increment(String metricName, long value) {
            Long old = metrics.get(metricName);
            metrics.put(metricName, old == null ? value : old + value);
        }

        @Override
        public void gauge(String metricName, long value) {
            metrics.put(metricName, value);
        }
    });
    // Note that we don't need to finish tx for metrics to be reported
    Transaction tx0 = txClient.startShort();
    ((TransactionAware) table).startTx(tx0);
    int writes = 0;
    int reads = 0;
    table.put(new Put(R1, C1, V1));
    verifyDatasetMetrics(metrics, ++writes, reads);
    table.compareAndSwap(R1, C1, V1, V2);
    verifyDatasetMetrics(metrics, ++writes, ++reads);
    // note: will not write anything as expected value will not match
    table.compareAndSwap(R1, C1, V1, V2);
    verifyDatasetMetrics(metrics, writes, ++reads);
    table.increment(new Increment(R2, C2, 1L));
    if (readless) {
        verifyDatasetMetrics(metrics, ++writes, reads);
    } else {
        verifyDatasetMetrics(metrics, ++writes, ++reads);
    }
    table.incrementAndGet(new Increment(R2, C2, 1L));
    verifyDatasetMetrics(metrics, ++writes, ++reads);
    table.get(new Get(R1, C1, V1));
    verifyDatasetMetrics(metrics, writes, ++reads);
    Scanner scanner = table.scan(new Scan(null, null));
    while (scanner.next() != null) {
        verifyDatasetMetrics(metrics, writes, ++reads);
    }
    table.delete(new Delete(R1, C1, V1));
    verifyDatasetMetrics(metrics, ++writes, reads);
    // drop table
    admin.drop();
}
Also used : MetricsCollector(co.cask.cdap.api.metrics.MetricsCollector) Delete(co.cask.cdap.api.dataset.table.Delete) Scanner(co.cask.cdap.api.dataset.table.Scanner) Table(co.cask.cdap.api.dataset.table.Table) HBaseTable(co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable) DatasetProperties(co.cask.cdap.api.dataset.DatasetProperties) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) Put(co.cask.cdap.api.dataset.table.Put) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) Increment(co.cask.cdap.api.dataset.table.Increment) Get(co.cask.cdap.api.dataset.table.Get) MeteredDataset(co.cask.cdap.api.dataset.metrics.MeteredDataset) Scan(co.cask.cdap.api.dataset.table.Scan)

Aggregations

MetricsCollector (co.cask.cdap.api.metrics.MetricsCollector)2 DatasetAdmin (co.cask.cdap.api.dataset.DatasetAdmin)1 DatasetProperties (co.cask.cdap.api.dataset.DatasetProperties)1 MeteredDataset (co.cask.cdap.api.dataset.metrics.MeteredDataset)1 Delete (co.cask.cdap.api.dataset.table.Delete)1 Get (co.cask.cdap.api.dataset.table.Get)1 Increment (co.cask.cdap.api.dataset.table.Increment)1 Put (co.cask.cdap.api.dataset.table.Put)1 Scan (co.cask.cdap.api.dataset.table.Scan)1 Scanner (co.cask.cdap.api.dataset.table.Scanner)1 Table (co.cask.cdap.api.dataset.table.Table)1 TimeProvider (co.cask.cdap.common.utils.TimeProvider)1 HBaseTable (co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable)1 StoreRequest (co.cask.cdap.messaging.StoreRequest)1 TopicMetadata (co.cask.cdap.messaging.TopicMetadata)1 MessageId (co.cask.cdap.messaging.data.MessageId)1 RawMessage (co.cask.cdap.messaging.data.RawMessage)1 NamespaceId (co.cask.cdap.proto.id.NamespaceId)1 TopicId (co.cask.cdap.proto.id.TopicId)1 IOException (java.io.IOException)1