use of co.cask.cdap.api.metrics.MetricsCollector in project cdap by caskdata.
the class ConcurrentMessageWriterTest method testMultiMaxSequence.
@Test
public void testMultiMaxSequence() throws IOException, InterruptedException {
TopicId topicId = new NamespaceId("ns1").topic("t1");
final TopicMetadata metadata = new TopicMetadata(topicId, new HashMap<String, String>(), 1);
// This test the case when multiple StoreRequests combined exceeding the 65536 payload.
// See testMaxSequence() for more details when it matters
// Generate 3 StoreRequests, each with 43690 messages
int msgCount = StoreRequestWriter.SEQUENCE_ID_LIMIT / 3 * 2;
int requestCount = 3;
List<StoreRequest> requests = new ArrayList<>();
for (int i = 0; i < requestCount; i++) {
List<String> payloads = new ArrayList<>(msgCount);
for (int j = 0; j < msgCount; j++) {
payloads.add(Integer.toString(j));
}
requests.add(new TestStoreRequest(topicId, payloads));
}
TestStoreRequestWriter testWriter = new TestStoreRequestWriter(new TimeProvider.IncrementalTimeProvider());
// We use a custom metrics collector here to make all the persist calls reached the same latch,
// since we know that the ConcurrentMessageWriter will emit a metrics "persist.requested" after enqueued but
// before flushing.
// This will make all requests batched together
final CountDownLatch latch = new CountDownLatch(requestCount);
final ConcurrentMessageWriter writer = new ConcurrentMessageWriter(testWriter, new MetricsCollector() {
@Override
public void increment(String metricName, long value) {
if ("persist.requested".equals(metricName)) {
latch.countDown();
Uninterruptibles.awaitUninterruptibly(latch);
}
}
@Override
public void gauge(String metricName, long value) {
LOG.info("MetricsContext.gauge: {} = {}", metricName, value);
}
});
ExecutorService executor = Executors.newFixedThreadPool(3);
for (final StoreRequest request : requests) {
executor.submit(new Runnable() {
@Override
public void run() {
try {
writer.persist(request, metadata);
} catch (IOException e) {
LOG.error("Failed to persist", e);
}
}
});
}
executor.shutdown();
Assert.assertTrue(executor.awaitTermination(1, TimeUnit.MINUTES));
// Validates all messages are being written
List<RawMessage> messages = testWriter.getMessages().get(topicId);
Assert.assertEquals(requestCount * msgCount, messages.size());
// We expect the payload is in repeated sequence of [0..msgCount-1]
int expectedPayload = 0;
// The sequenceId should be (i % SEQUENCE_ID_LIMIT)
for (int i = 0; i < messages.size(); i++) {
RawMessage message = messages.get(i);
MessageId messageId = new MessageId(message.getId());
Assert.assertEquals(i / StoreRequestWriter.SEQUENCE_ID_LIMIT, messageId.getPublishTimestamp());
Assert.assertEquals((short) (i % StoreRequestWriter.SEQUENCE_ID_LIMIT), messageId.getSequenceId());
Assert.assertEquals(expectedPayload, Integer.parseInt(Bytes.toString(message.getPayload())));
expectedPayload = (expectedPayload + 1) % msgCount;
}
}
use of co.cask.cdap.api.metrics.MetricsCollector in project cdap by caskdata.
the class TableTest method testMetrics.
private void testMetrics(boolean readless) throws Exception {
final String tableName = "survive";
DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
admin.create();
Table table = getTable(CONTEXT1, tableName, props);
final Map<String, Long> metrics = Maps.newHashMap();
((MeteredDataset) table).setMetricsCollector(new MetricsCollector() {
@Override
public void increment(String metricName, long value) {
Long old = metrics.get(metricName);
metrics.put(metricName, old == null ? value : old + value);
}
@Override
public void gauge(String metricName, long value) {
metrics.put(metricName, value);
}
});
// Note that we don't need to finish tx for metrics to be reported
Transaction tx0 = txClient.startShort();
((TransactionAware) table).startTx(tx0);
int writes = 0;
int reads = 0;
table.put(new Put(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, ++writes, ++reads);
// note: will not write anything as expected value will not match
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, writes, ++reads);
table.increment(new Increment(R2, C2, 1L));
if (readless) {
verifyDatasetMetrics(metrics, ++writes, reads);
} else {
verifyDatasetMetrics(metrics, ++writes, ++reads);
}
table.incrementAndGet(new Increment(R2, C2, 1L));
verifyDatasetMetrics(metrics, ++writes, ++reads);
table.get(new Get(R1, C1, V1));
verifyDatasetMetrics(metrics, writes, ++reads);
Scanner scanner = table.scan(new Scan(null, null));
while (scanner.next() != null) {
verifyDatasetMetrics(metrics, writes, ++reads);
}
table.delete(new Delete(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
// drop table
admin.drop();
}
Aggregations