use of io.smallrye.reactive.messaging.kafka.KafkaRecord in project smallrye-reactive-messaging by smallrye.
the class KafkaCommitHandlerTest method testSourceWithThrottledLatestProcessedCommitEnabled.
@Test
public void testSourceWithThrottledLatestProcessedCommitEnabled() {
MapBasedConfig config = newCommonConfigForSource().with("client.id", UUID.randomUUID().toString()).with("group.id", "test-source-with-throttled-latest-processed-commit").with("auto.offset.reset", "earliest").with("value.deserializer", IntegerDeserializer.class.getName()).with("commit-strategy", "throttled").with("throttled.unprocessed-record-max-age.ms", 100);
KafkaConnectorIncomingConfiguration ic = new KafkaConnectorIncomingConfiguration(config);
source = new KafkaSource<>(vertx, "test-source-with-throttled-latest-processed-commit", ic, UnsatisfiedInstance.instance(), CountKafkaCdiEvents.noCdiEvents, UnsatisfiedInstance.instance(), -1);
List<Message<?>> messages = Collections.synchronizedList(new ArrayList<>());
source.getStream().subscribe().with(messages::add);
companion.produceIntegers().usingGenerator(i -> new ProducerRecord<>(topic, i), 10);
await().atMost(2, TimeUnit.MINUTES).until(() -> messages.size() >= 10);
assertThat(messages.stream().map(m -> ((KafkaRecord<String, Integer>) m).getPayload()).collect(Collectors.toList())).containsExactly(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
await().atMost(2, TimeUnit.MINUTES).ignoreExceptions().untilAsserted(() -> {
// we must keep acking to eventually induce a commit
messages.forEach(Message::ack);
TopicPartition topicPartition = new TopicPartition(topic, 0);
OffsetAndMetadata offset = companion.consumerGroups().offsets("test-source-with-throttled-latest-processed-commit", topicPartition);
assertNotNull(offset);
assertEquals(10L, offset.offset());
});
await().atMost(2, TimeUnit.MINUTES).untilAsserted(() -> {
HealthReport.HealthReportBuilder healthReportBuilder = HealthReport.builder();
source.isAlive(healthReportBuilder);
assertTrue(healthReportBuilder.build().isOk());
});
}
use of io.smallrye.reactive.messaging.kafka.KafkaRecord in project smallrye-reactive-messaging by smallrye.
the class HighLatencyTest method testHighLatency.
@Test
public void testHighLatency() throws InterruptedException, IOException {
MapBasedConfig config = newCommonConfigForSource().with("value.deserializer", IntegerDeserializer.class.getName()).with(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 6000).with(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 100).with("retry", true).with("retry-attempts", 100).with("retry-max-wait", 30);
KafkaConnectorIncomingConfiguration ic = new KafkaConnectorIncomingConfiguration(config);
source = new KafkaSource<>(vertx, UUID.randomUUID().toString(), ic, UnsatisfiedInstance.instance(), CountKafkaCdiEvents.noCdiEvents, UnsatisfiedInstance.instance(), -1);
List<KafkaRecord<?, ?>> messages1 = new ArrayList<>();
source.getStream().subscribe().with(messages1::add);
companion.produceIntegers().usingGenerator(i -> new ProducerRecord<>(topic, i), 10);
await().atMost(2, TimeUnit.MINUTES).until(() -> messages1.size() >= 10);
toxics().latency("latency", ToxicDirection.UPSTREAM, 6000 + 1000);
// session timeout + a bit more just in case.
Thread.sleep(6000 + 2000);
toxics().get("latency").remove();
companion.produceIntegers().usingGenerator(i -> new ProducerRecord<>(topic, 10 + i), 10);
await().atMost(2, TimeUnit.MINUTES).until(() -> messages1.size() >= 20);
assertThat(messages1.size()).isGreaterThanOrEqualTo(20);
}
use of io.smallrye.reactive.messaging.kafka.KafkaRecord in project smallrye-reactive-messaging by smallrye.
the class KafkaSourceWithCloudEventsTest method testWithBeanReceivingBinaryAndStructuredCloudEvents.
@SuppressWarnings("unchecked")
@Test
public void testWithBeanReceivingBinaryAndStructuredCloudEvents() {
ConsumptionBean bean = run(getConfig(topic));
List<KafkaRecord<String, String>> list = bean.getKafkaRecords();
assertThat(list).isEmpty();
// Send a binary cloud event
List<Header> headers = new ArrayList<>();
headers.add(new RecordHeader("ce_specversion", CloudEventMetadata.CE_VERSION_1_0.getBytes()));
headers.add(new RecordHeader("ce_type", "type".getBytes()));
headers.add(new RecordHeader("ce_source", "test://test".getBytes()));
headers.add(new RecordHeader("ce_id", "id".getBytes()));
headers.add(new RecordHeader("content-type", "text/plain".getBytes()));
headers.add(new RecordHeader("ce_subject", "foo".getBytes()));
companion.produceStrings().fromRecords(new ProducerRecord<>(topic, null, null, "binary", "Hello Binary 1", headers));
await().atMost(10, TimeUnit.SECONDS).until(() -> list.size() >= 1);
KafkaRecord<String, String> record = list.get(0);
assertThat(record.getTopic()).isEqualTo(topic);
IncomingKafkaCloudEventMetadata<String, String> metadata = record.getMetadata(IncomingKafkaCloudEventMetadata.class).orElse(null);
assertThat(metadata).isNotNull();
assertThat(metadata.getTopic()).isEqualTo(topic);
assertThat(metadata.getKey()).isEqualTo("binary");
assertThat(metadata.getId()).isEqualTo("id");
assertThat(metadata.getSubject()).hasValue("foo");
assertThat(metadata.getData()).isEqualTo("Hello Binary 1");
assertThat(record.getPayload()).isEqualTo("Hello Binary 1");
// send a structured event
companion.produceStrings().fromRecords(new ProducerRecord<>(topic, null, null, "structured", new JsonObject().put("specversion", CloudEventMetadata.CE_VERSION_1_0).put("type", "type").put("id", "id").put("source", "test://test").put("subject", "bar").put("datacontenttype", "application/json").put("dataschema", "http://schema.io").put("time", "2020-07-23T09:12:34Z").put("data", "Hello Structured 1").encode(), Collections.singletonList(new RecordHeader("content-type", "application/cloudevents+json; charset=utf-8".getBytes()))));
await().atMost(10, TimeUnit.SECONDS).until(() -> list.size() >= 2);
record = list.get(1);
assertThat(record.getTopic()).isEqualTo(topic);
metadata = record.getMetadata(IncomingKafkaCloudEventMetadata.class).orElse(null);
assertThat(metadata).isNotNull();
assertThat(metadata.getTopic()).isEqualTo(topic);
assertThat(metadata.getKey()).isEqualTo("structured");
assertThat(metadata.getId()).isEqualTo("id");
assertThat(metadata.getSubject()).hasValue("bar");
assertThat(metadata.getData()).isEqualTo("Hello Structured 1");
assertThat(record.getPayload()).contains("Hello Structured 1");
// Send a last binary cloud event
List<Header> headers2 = new ArrayList<>();
headers2.add(new RecordHeader("ce_specversion", CloudEventMetadata.CE_VERSION_1_0.getBytes()));
headers2.add(new RecordHeader("ce_type", "type".getBytes()));
headers2.add(new RecordHeader("ce_source", "test://test".getBytes()));
headers2.add(new RecordHeader("ce_id", "id".getBytes()));
headers2.add(new RecordHeader("content-type", "text/plain".getBytes()));
headers2.add(new RecordHeader("ce_subject", "foo".getBytes()));
companion.produceStrings().fromRecords(new ProducerRecord<>(topic, null, null, "binary", "Hello Binary 2", headers2));
await().atMost(10, TimeUnit.SECONDS).until(() -> list.size() >= 3);
record = list.get(2);
assertThat(record.getTopic()).isEqualTo(topic);
metadata = record.getMetadata(IncomingKafkaCloudEventMetadata.class).orElse(null);
assertThat(metadata).isNotNull();
assertThat(metadata.getTopic()).isEqualTo(topic);
assertThat(metadata.getKey()).isEqualTo("binary");
assertThat(metadata.getId()).isEqualTo("id");
assertThat(metadata.getSubject()).hasValue("foo");
assertThat(metadata.getData()).isEqualTo("Hello Binary 2");
assertThat(record.getPayload()).isEqualTo("Hello Binary 2");
}
Aggregations