use of org.wikidata.query.rdf.updater.RDFDataChunk in project wikidata-query-rdf by wikimedia.
the class KafkaStreamConsumerMetricsListenerUnitTest method test_metrics_are_reported.
@Test
public void test_metrics_are_reported() {
Instant now = Instant.now();
Clock fixedClock = Clock.fixed(now, ZoneOffset.UTC);
Duration lagEvt1 = Duration.ofHours(2);
Duration lagEvt2 = Duration.ofHours(1);
Instant evTime1 = now.minus(lagEvt1);
Instant evTime2 = now.minus(lagEvt2);
MutationEventData msg1 = new DiffEventData(new EventsMeta(Instant.now(), "unused", "domain", "stream", "req"), "Q0", 1, evTime1, 0, 1, MutationEventData.IMPORT_OPERATION, new RDFDataChunk("\n<uri:a> <uri:a> <uri:a> .\n", RDFFormat.TURTLE.getDefaultMIMEType()), null, null, null);
MutationEventData msg2 = new DiffEventData(new EventsMeta(Instant.now(), "unused", "domain", "stream", "req"), "Q0", 2, evTime2, 0, 1, MutationEventData.IMPORT_OPERATION, new RDFDataChunk("\n<uri:b> <uri:b> <uri:b> .\n", RDFFormat.TURTLE.getDefaultMIMEType()), null, null, null);
TopicPartition topicPartition = new TopicPartition("topic", 0);
when(consumer.poll(any())).thenReturn(new ConsumerRecords<>(singletonMap(topicPartition, singletonList(new ConsumerRecord<>(topicPartition.topic(), topicPartition.partition(), 0, null, msg1)))), new ConsumerRecords<>(singletonMap(topicPartition, singletonList(new ConsumerRecord<>(topicPartition.topic(), topicPartition.partition(), 1, null, msg2)))));
MetricRegistry registry = new MetricRegistry();
KafkaStreamConsumer streamConsumer = new KafkaStreamConsumer(consumer, topicPartition, chunkDeser, 1, new KafkaStreamConsumerMetricsListener(registry, fixedClock), m -> true);
streamConsumer.poll(Duration.ofMillis(0));
Gauge<Long> lag = registry.getGauges().get("kafka-stream-consumer-lag");
Counter offered = registry.getCounters().get("kafka-stream-consumer-triples-offered");
Counter accumulated = registry.getCounters().get("kafka-stream-consumer-triples-accumulated");
assertThat(lag.getValue()).isZero();
assertThat(offered.getCount()).isEqualTo(1);
assertThat(accumulated.getCount()).isEqualTo(1);
streamConsumer.acknowledge();
assertThat(lag.getValue()).isEqualTo(lagEvt1.toMillis());
streamConsumer.poll(Duration.ofMillis(0));
assertThat(offered.getCount()).isEqualTo(2);
assertThat(accumulated.getCount()).isEqualTo(2);
assertThat(lag.getValue()).isEqualTo(lagEvt1.toMillis());
streamConsumer.acknowledge();
assertThat(lag.getValue()).isEqualTo(lagEvt2.toMillis());
}
use of org.wikidata.query.rdf.updater.RDFDataChunk in project wikidata-query-rdf by wikimedia.
the class KafkaStreamConsumerUnitTest method test_prefer_reassembled_message.
@Test
public void test_prefer_reassembled_message() {
int bufferedMessages = 250;
TopicPartition topicPartition = new TopicPartition("test", 0);
List<ConsumerRecord<String, MutationEventData>> allRecords = IntStream.range(0, bufferedMessages).mapToObj(i -> {
EventsMeta meta = new EventsMeta(Instant.EPOCH, UUID.randomUUID().toString(), TEST_DOMAIN, TESTED_STREAM, "unused");
MutationEventData diff = new DiffEventData(meta, "Q1", 1, Instant.EPOCH, i, bufferedMessages, MutationEventData.DIFF_OPERATION, new RDFDataChunk("<uri:a> <uri:a> <uri:" + i + "> .\n", RDFFormat.TURTLE.getDefaultMIMEType()), null, null, null);
return new ConsumerRecord<String, MutationEventData>(topicPartition.topic(), topicPartition.partition(), i, null, diff);
}).collect(toList());
when(consumer.poll(any())).thenReturn(new ConsumerRecords<>(singletonMap(topicPartition, allRecords.subList(0, bufferedMessages / 2))), new ConsumerRecords<>(singletonMap(topicPartition, allRecords.subList(bufferedMessages / 2, allRecords.size()))), new ConsumerRecords<>(emptyMap()));
KafkaStreamConsumer streamConsumer = new KafkaStreamConsumer(consumer, topicPartition, chunkDeser, 10, KafkaStreamConsumerMetricsListener.forRegistry(new MetricRegistry()), m -> true);
StreamConsumer.Batch b = streamConsumer.poll(Duration.ofMillis(100));
assertThat(b).isNotNull();
ConsumerPatch patch = b.getPatch();
assertThat(patch.getAdded().size()).isEqualTo(bufferedMessages);
streamConsumer.acknowledge();
b = streamConsumer.poll(Duration.ofMillis(100));
assertThat(b).isNull();
}
Aggregations