use of io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata in project smallrye-reactive-messaging by smallrye.
the class DeprecatedCommitStrategiesTest method testThrottledStrategyWithTooManyUnackedMessages.
@Test
void testThrottledStrategyWithTooManyUnackedMessages() throws Exception {
MapBasedConfig config = commonConfiguration().with("client.id", UUID.randomUUID().toString()).with("commit-strategy", "throttled").with("auto.offset.reset", "earliest").with("health-enabled", true).with("throttled.unprocessed-record-max-age.ms", 1000).with("auto.commit.interval.ms", 100);
String group = UUID.randomUUID().toString();
source = new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1);
injectMockConsumer(source, consumer);
List<Message<?>> list = new ArrayList<>();
source.getStream().subscribe().with(list::add);
TopicPartition p0 = new TopicPartition(TOPIC, 0);
TopicPartition p1 = new TopicPartition(TOPIC, 1);
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(p0, 0L);
offsets.put(p1, 5L);
consumer.updateBeginningOffsets(offsets);
consumer.schedulePollTask(() -> {
consumer.rebalance(offsets.keySet());
source.getCommitHandler().partitionsAssigned(offsets.keySet());
for (int i = 0; i < 500; i++) {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, i, "k", "v0-" + i));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, i, "r", "v1-" + i));
}
});
// Expected number of messages: 500 messages in each partition minus the [0..5) messages from p1
int expected = 500 * 2 - 5;
await().until(() -> list.size() == expected);
assertThat(list).hasSize(expected);
// Only ack the one from partition 0, and the 3 first items from partition 1.
int count = 0;
for (Message<?> message : list) {
IncomingKafkaRecordMetadata<?, ?> metadata = message.getMetadata(IncomingKafkaRecordMetadata.class).orElseThrow(() -> new Exception("metadata expected"));
if (metadata.getPartition() == 0) {
message.ack().toCompletableFuture().join();
} else {
if (count < 5) {
message.ack().toCompletableFuture().join();
count = count + 1;
}
}
LegacyMetadataTestUtils.tempCompareLegacyAndApiMetadata(metadata, message);
}
AtomicReference<HealthReport> report = new AtomicReference<>();
await().until(() -> {
HealthReport.HealthReportBuilder builder = HealthReport.builder();
source.isAlive(builder);
HealthReport r = builder.build();
report.set(r);
return !r.isOk();
});
HealthReport r = report.get();
String message = r.getChannels().get(0).getMessage();
assertThat(message).contains("my-topic-1", "9");
}
use of io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata in project smallrye-reactive-messaging by smallrye.
the class CommitStrategiesTest method testThrottledStrategyWithTooManyUnackedMessages.
@Test
void testThrottledStrategyWithTooManyUnackedMessages() throws Exception {
MapBasedConfig config = commonConfiguration().with("client.id", UUID.randomUUID().toString()).with("commit-strategy", "throttled").with("auto.offset.reset", "earliest").with("health-enabled", true).with("throttled.unprocessed-record-max-age.ms", 1000).with("auto.commit.interval.ms", 100);
String group = UUID.randomUUID().toString();
source = new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1);
injectMockConsumer(source, consumer);
List<Message<?>> list = new ArrayList<>();
source.getStream().subscribe().with(list::add);
TopicPartition p0 = new TopicPartition(TOPIC, 0);
TopicPartition p1 = new TopicPartition(TOPIC, 1);
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(p0, 0L);
offsets.put(p1, 5L);
consumer.updateBeginningOffsets(offsets);
consumer.schedulePollTask(() -> {
consumer.rebalance(offsets.keySet());
source.getCommitHandler().partitionsAssigned(offsets.keySet());
for (int i = 0; i < 500; i++) {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, i, "k", "v0-" + i));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, i, "r", "v1-" + i));
}
});
// Expected number of messages: 500 messages in each partition minus the [0..5) messages from p1
int expected = 500 * 2 - 5;
await().until(() -> list.size() == expected);
assertThat(list).hasSize(expected);
// Only ack the one from partition 0, and the 3 first items from partition 1.
int count = 0;
for (Message<?> message : list) {
IncomingKafkaRecordMetadata<?, ?> metadata = message.getMetadata(IncomingKafkaRecordMetadata.class).orElseThrow(() -> new Exception("metadata expected"));
if (metadata.getPartition() == 0) {
message.ack().toCompletableFuture().join();
} else {
if (count < 5) {
message.ack().toCompletableFuture().join();
count = count + 1;
}
}
LegacyMetadataTestUtils.tempCompareLegacyAndApiMetadata(metadata, message);
}
AtomicReference<HealthReport> report = new AtomicReference<>();
await().until(() -> {
HealthReport.HealthReportBuilder builder = HealthReport.builder();
source.isAlive(builder);
HealthReport r = builder.build();
report.set(r);
return !r.isOk();
});
HealthReport r = report.get();
String message = r.getChannels().get(0).getMessage();
assertThat(message).contains("my-topic-1", "9");
}
use of io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata in project smallrye-reactive-messaging by smallrye.
the class KafkaCloudEventHelper method createStructuredRecord.
@SuppressWarnings("rawtypes")
public static ProducerRecord<?, ?> createStructuredRecord(Message<?> message, String topic, OutgoingKafkaRecordMetadata<?> outgoingMetadata, IncomingKafkaRecordMetadata<?, ?> incomingMetadata, OutgoingCloudEventMetadata<?> ceMetadata, RuntimeKafkaSinkConfiguration configuration) {
if (ceMetadata == null) {
ceMetadata = OutgoingCloudEventMetadata.builder().build();
}
Integer partition = getPartition(outgoingMetadata, configuration);
Object key = getKey(message, outgoingMetadata, ceMetadata, configuration);
Long timestamp = getTimestamp(outgoingMetadata);
List<Header> headers = getHeaders(outgoingMetadata, incomingMetadata, configuration);
String source = getSource(ceMetadata, configuration);
String type = getType(ceMetadata, configuration);
Optional<String> subject = getSubject(ceMetadata, configuration);
Optional<String> dataContentType = getDataContentType(ceMetadata, configuration);
Optional<URI> schema = getDataSchema(ceMetadata, configuration);
// if headers does not contain a "content-type" header add one
Optional<Header> contentType = headers.stream().filter(h -> h.key().equalsIgnoreCase(KAFKA_HEADER_CONTENT_TYPE)).findFirst();
if (!contentType.isPresent()) {
headers.add(new RecordHeader(KAFKA_HEADER_CONTENT_TYPE, STRUCTURED_CONTENT_TYPE.getBytes()));
}
// We need to build the JSON Object representing the Cloud Event
JsonObject json = new JsonObject();
json.put(CE_ATTRIBUTE_SPEC_VERSION, ceMetadata.getSpecVersion()).put(CE_ATTRIBUTE_TYPE, type).put(CE_ATTRIBUTE_SOURCE, source).put(CE_ATTRIBUTE_ID, ceMetadata.getId());
ZonedDateTime time = ceMetadata.getTimeStamp().orElse(null);
if (time != null) {
json.put(CE_ATTRIBUTE_TIME, time.toInstant());
} else if (configuration.getCloudEventsInsertTimestamp()) {
json.put(CE_ATTRIBUTE_TIME, Instant.now());
}
schema.ifPresent(s -> json.put(CE_ATTRIBUTE_DATA_SCHEMA, s));
dataContentType.ifPresent(s -> json.put(CE_ATTRIBUTE_DATA_CONTENT_TYPE, s));
subject.ifPresent(s -> json.put(CE_ATTRIBUTE_SUBJECT, s));
// Extensions
ceMetadata.getExtensions().forEach(json::put);
// Encode the payload to json
Object payload = message.getPayload();
if (payload instanceof Record) {
payload = ((Record) payload).value();
}
if (payload instanceof String) {
json.put("data", payload);
} else {
json.put("data", JsonObject.mapFrom(payload));
}
return new ProducerRecord<>(topic, partition, timestamp, key, json.encode(), headers);
}
use of io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata in project smallrye-reactive-messaging by smallrye.
the class MultiTopicsTest method testWithPattern.
@Test
public void testWithPattern() {
String topic1 = "greetings-" + UUID.randomUUID().toString();
String topic2 = "greetings-" + UUID.randomUUID().toString();
String topic3 = "greetings-" + UUID.randomUUID().toString();
companion.topics().createAndWait(topic1, 1);
companion.topics().createAndWait(topic2, 1);
companion.topics().createAndWait(topic3, 1);
KafkaConsumer bean = runApplication(kafkaConfig("mp.messaging.incoming.kafka").with("value.deserializer", StringDeserializer.class.getName()).with("topic", "greetings-.+").with("pattern", true).with("auto.offset.reset", "earliest"), KafkaConsumer.class);
await().until(this::isReady);
await().until(this::isAlive);
assertThat(bean.getMessages()).isEmpty();
companion.produceStrings().usingGenerator(i -> new ProducerRecord<>(topic1, "hello"), 3);
companion.produceStrings().usingGenerator(i -> new ProducerRecord<>(topic2, "hallo"), 3);
companion.produceStrings().usingGenerator(i -> new ProducerRecord<>(topic3, "bonjour"), 3);
companion.produceStrings().usingGenerator(i -> new ProducerRecord<>("do-not-match", "Bahh!"), 3);
await().until(() -> bean.getMessages().size() >= 9);
AtomicInteger top1 = new AtomicInteger();
AtomicInteger top2 = new AtomicInteger();
AtomicInteger top3 = new AtomicInteger();
bean.getMessages().forEach(message -> {
IncomingKafkaRecordMetadata record = message.getMetadata(IncomingKafkaRecordMetadata.class).orElse(null);
assertThat(record).isNotNull();
String topic = record.getTopic();
if (topic.equals(topic1)) {
top1.incrementAndGet();
assertThat(message.getPayload()).isEqualTo("hello");
} else if (topic.equals(topic2)) {
top2.incrementAndGet();
assertThat(message.getPayload()).isEqualTo("hallo");
} else if (topic.equals(topic3)) {
top3.incrementAndGet();
assertThat(message.getPayload()).isEqualTo("bonjour");
}
LegacyMetadataTestUtils.tempCompareLegacyAndApiMetadata(record, message);
});
assertThat(top1).hasValue(3);
assertThat(top2).hasValue(3);
assertThat(top3).hasValue(3);
}
Aggregations