Search in sources :

Example 6 with IncomingKafkaRecordMetadata

use of io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata in project smallrye-reactive-messaging by smallrye.

the class DeprecatedCommitStrategiesTest method testThrottledStrategyWithTooManyUnackedMessages.

@Test
void testThrottledStrategyWithTooManyUnackedMessages() throws Exception {
    MapBasedConfig config = commonConfiguration().with("client.id", UUID.randomUUID().toString()).with("commit-strategy", "throttled").with("auto.offset.reset", "earliest").with("health-enabled", true).with("throttled.unprocessed-record-max-age.ms", 1000).with("auto.commit.interval.ms", 100);
    String group = UUID.randomUUID().toString();
    source = new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1);
    injectMockConsumer(source, consumer);
    List<Message<?>> list = new ArrayList<>();
    source.getStream().subscribe().with(list::add);
    TopicPartition p0 = new TopicPartition(TOPIC, 0);
    TopicPartition p1 = new TopicPartition(TOPIC, 1);
    Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(p0, 0L);
    offsets.put(p1, 5L);
    consumer.updateBeginningOffsets(offsets);
    consumer.schedulePollTask(() -> {
        consumer.rebalance(offsets.keySet());
        source.getCommitHandler().partitionsAssigned(offsets.keySet());
        for (int i = 0; i < 500; i++) {
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, i, "k", "v0-" + i));
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, i, "r", "v1-" + i));
        }
    });
    // Expected number of messages: 500 messages in each partition minus the [0..5) messages from p1
    int expected = 500 * 2 - 5;
    await().until(() -> list.size() == expected);
    assertThat(list).hasSize(expected);
    // Only ack the one from partition 0, and the 3 first items from partition 1.
    int count = 0;
    for (Message<?> message : list) {
        IncomingKafkaRecordMetadata<?, ?> metadata = message.getMetadata(IncomingKafkaRecordMetadata.class).orElseThrow(() -> new Exception("metadata expected"));
        if (metadata.getPartition() == 0) {
            message.ack().toCompletableFuture().join();
        } else {
            if (count < 5) {
                message.ack().toCompletableFuture().join();
                count = count + 1;
            }
        }
        LegacyMetadataTestUtils.tempCompareLegacyAndApiMetadata(metadata, message);
    }
    AtomicReference<HealthReport> report = new AtomicReference<>();
    await().until(() -> {
        HealthReport.HealthReportBuilder builder = HealthReport.builder();
        source.isAlive(builder);
        HealthReport r = builder.build();
        report.set(r);
        return !r.isOk();
    });
    HealthReport r = report.get();
    String message = r.getChannels().get(0).getMessage();
    assertThat(message).contains("my-topic-1", "9");
}
Also used : Message(org.eclipse.microprofile.reactive.messaging.Message) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) AtomicReference(java.util.concurrent.atomic.AtomicReference) UnsatisfiedResolutionException(javax.enterprise.inject.UnsatisfiedResolutionException) DeploymentException(javax.enterprise.inject.spi.DeploymentException) IncomingKafkaRecordMetadata(io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata) HealthReport(io.smallrye.reactive.messaging.health.HealthReport) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaConnectorIncomingConfiguration(io.smallrye.reactive.messaging.kafka.KafkaConnectorIncomingConfiguration) MapBasedConfig(io.smallrye.reactive.messaging.test.common.config.MapBasedConfig) Test(org.junit.jupiter.api.Test)

Example 7 with IncomingKafkaRecordMetadata

use of io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata in project smallrye-reactive-messaging by smallrye.

the class CommitStrategiesTest method testThrottledStrategyWithTooManyUnackedMessages.

@Test
void testThrottledStrategyWithTooManyUnackedMessages() throws Exception {
    MapBasedConfig config = commonConfiguration().with("client.id", UUID.randomUUID().toString()).with("commit-strategy", "throttled").with("auto.offset.reset", "earliest").with("health-enabled", true).with("throttled.unprocessed-record-max-age.ms", 1000).with("auto.commit.interval.ms", 100);
    String group = UUID.randomUUID().toString();
    source = new KafkaSource<>(vertx, group, new KafkaConnectorIncomingConfiguration(config), getConsumerRebalanceListeners(), CountKafkaCdiEvents.noCdiEvents, getDeserializationFailureHandlers(), -1);
    injectMockConsumer(source, consumer);
    List<Message<?>> list = new ArrayList<>();
    source.getStream().subscribe().with(list::add);
    TopicPartition p0 = new TopicPartition(TOPIC, 0);
    TopicPartition p1 = new TopicPartition(TOPIC, 1);
    Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(p0, 0L);
    offsets.put(p1, 5L);
    consumer.updateBeginningOffsets(offsets);
    consumer.schedulePollTask(() -> {
        consumer.rebalance(offsets.keySet());
        source.getCommitHandler().partitionsAssigned(offsets.keySet());
        for (int i = 0; i < 500; i++) {
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, i, "k", "v0-" + i));
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, i, "r", "v1-" + i));
        }
    });
    // Expected number of messages: 500 messages in each partition minus the [0..5) messages from p1
    int expected = 500 * 2 - 5;
    await().until(() -> list.size() == expected);
    assertThat(list).hasSize(expected);
    // Only ack the one from partition 0, and the 3 first items from partition 1.
    int count = 0;
    for (Message<?> message : list) {
        IncomingKafkaRecordMetadata<?, ?> metadata = message.getMetadata(IncomingKafkaRecordMetadata.class).orElseThrow(() -> new Exception("metadata expected"));
        if (metadata.getPartition() == 0) {
            message.ack().toCompletableFuture().join();
        } else {
            if (count < 5) {
                message.ack().toCompletableFuture().join();
                count = count + 1;
            }
        }
        LegacyMetadataTestUtils.tempCompareLegacyAndApiMetadata(metadata, message);
    }
    AtomicReference<HealthReport> report = new AtomicReference<>();
    await().until(() -> {
        HealthReport.HealthReportBuilder builder = HealthReport.builder();
        source.isAlive(builder);
        HealthReport r = builder.build();
        report.set(r);
        return !r.isOk();
    });
    HealthReport r = report.get();
    String message = r.getChannels().get(0).getMessage();
    assertThat(message).contains("my-topic-1", "9");
}
Also used : Message(org.eclipse.microprofile.reactive.messaging.Message) AtomicReference(java.util.concurrent.atomic.AtomicReference) AmbiguousResolutionException(javax.enterprise.inject.AmbiguousResolutionException) UnsatisfiedResolutionException(javax.enterprise.inject.UnsatisfiedResolutionException) IncomingKafkaRecordMetadata(io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata) HealthReport(io.smallrye.reactive.messaging.health.HealthReport) TopicPartition(org.apache.kafka.common.TopicPartition) MapBasedConfig(io.smallrye.reactive.messaging.test.common.config.MapBasedConfig) RepeatedTest(org.junit.jupiter.api.RepeatedTest) Test(org.junit.jupiter.api.Test)

Example 8 with IncomingKafkaRecordMetadata

use of io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata in project smallrye-reactive-messaging by smallrye.

the class KafkaCloudEventHelper method createStructuredRecord.

@SuppressWarnings("rawtypes")
public static ProducerRecord<?, ?> createStructuredRecord(Message<?> message, String topic, OutgoingKafkaRecordMetadata<?> outgoingMetadata, IncomingKafkaRecordMetadata<?, ?> incomingMetadata, OutgoingCloudEventMetadata<?> ceMetadata, RuntimeKafkaSinkConfiguration configuration) {
    if (ceMetadata == null) {
        ceMetadata = OutgoingCloudEventMetadata.builder().build();
    }
    Integer partition = getPartition(outgoingMetadata, configuration);
    Object key = getKey(message, outgoingMetadata, ceMetadata, configuration);
    Long timestamp = getTimestamp(outgoingMetadata);
    List<Header> headers = getHeaders(outgoingMetadata, incomingMetadata, configuration);
    String source = getSource(ceMetadata, configuration);
    String type = getType(ceMetadata, configuration);
    Optional<String> subject = getSubject(ceMetadata, configuration);
    Optional<String> dataContentType = getDataContentType(ceMetadata, configuration);
    Optional<URI> schema = getDataSchema(ceMetadata, configuration);
    // if headers does not contain a "content-type" header add one
    Optional<Header> contentType = headers.stream().filter(h -> h.key().equalsIgnoreCase(KAFKA_HEADER_CONTENT_TYPE)).findFirst();
    if (!contentType.isPresent()) {
        headers.add(new RecordHeader(KAFKA_HEADER_CONTENT_TYPE, STRUCTURED_CONTENT_TYPE.getBytes()));
    }
    // We need to build the JSON Object representing the Cloud Event
    JsonObject json = new JsonObject();
    json.put(CE_ATTRIBUTE_SPEC_VERSION, ceMetadata.getSpecVersion()).put(CE_ATTRIBUTE_TYPE, type).put(CE_ATTRIBUTE_SOURCE, source).put(CE_ATTRIBUTE_ID, ceMetadata.getId());
    ZonedDateTime time = ceMetadata.getTimeStamp().orElse(null);
    if (time != null) {
        json.put(CE_ATTRIBUTE_TIME, time.toInstant());
    } else if (configuration.getCloudEventsInsertTimestamp()) {
        json.put(CE_ATTRIBUTE_TIME, Instant.now());
    }
    schema.ifPresent(s -> json.put(CE_ATTRIBUTE_DATA_SCHEMA, s));
    dataContentType.ifPresent(s -> json.put(CE_ATTRIBUTE_DATA_CONTENT_TYPE, s));
    subject.ifPresent(s -> json.put(CE_ATTRIBUTE_SUBJECT, s));
    // Extensions
    ceMetadata.getExtensions().forEach(json::put);
    // Encode the payload to json
    Object payload = message.getPayload();
    if (payload instanceof Record) {
        payload = ((Record) payload).value();
    }
    if (payload instanceof String) {
        json.put("data", payload);
    } else {
        json.put("data", JsonObject.mapFrom(payload));
    }
    return new ProducerRecord<>(topic, partition, timestamp, key, json.encode(), headers);
}
Also used : DateTimeFormatterBuilder(java.time.format.DateTimeFormatterBuilder) ChronoField(java.time.temporal.ChronoField) java.util(java.util) Record(io.smallrye.reactive.messaging.kafka.Record) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ZonedDateTime(java.time.ZonedDateTime) Headers(org.apache.kafka.common.header.Headers) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) CE_KAFKA_TOPIC(io.smallrye.reactive.messaging.kafka.IncomingKafkaCloudEventMetadata.CE_KAFKA_TOPIC) IncomingKafkaRecordMetadata(io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata) RuntimeKafkaSinkConfiguration(io.smallrye.reactive.messaging.kafka.impl.RuntimeKafkaSinkConfiguration) CloudEventMetadata(io.smallrye.reactive.messaging.ce.CloudEventMetadata) DefaultIncomingCloudEventMetadata(io.smallrye.reactive.messaging.ce.impl.DefaultIncomingCloudEventMetadata) BaseCloudEventMetadata(io.smallrye.reactive.messaging.ce.impl.BaseCloudEventMetadata) CE_KAFKA_KEY(io.smallrye.reactive.messaging.kafka.IncomingKafkaCloudEventMetadata.CE_KAFKA_KEY) JsonObject(io.vertx.core.json.JsonObject) URI(java.net.URI) IncomingKafkaCloudEventMetadata(io.smallrye.reactive.messaging.kafka.IncomingKafkaCloudEventMetadata) Instant(java.time.Instant) DefaultCloudEventMetadataBuilder(io.smallrye.reactive.messaging.ce.DefaultCloudEventMetadataBuilder) Collectors(java.util.stream.Collectors) StandardCharsets(java.nio.charset.StandardCharsets) Message(org.eclipse.microprofile.reactive.messaging.Message) OutgoingCloudEventMetadata(io.smallrye.reactive.messaging.ce.OutgoingCloudEventMetadata) OutgoingKafkaRecordMetadata(io.smallrye.reactive.messaging.kafka.api.OutgoingKafkaRecordMetadata) Buffer(io.vertx.mutiny.core.buffer.Buffer) Header(org.apache.kafka.common.header.Header) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) DateTimeFormatter(java.time.format.DateTimeFormatter) JsonObject(io.vertx.core.json.JsonObject) URI(java.net.URI) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) ZonedDateTime(java.time.ZonedDateTime) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) JsonObject(io.vertx.core.json.JsonObject) Record(io.smallrye.reactive.messaging.kafka.Record) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader)

Example 9 with IncomingKafkaRecordMetadata

use of io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata in project smallrye-reactive-messaging by smallrye.

the class MultiTopicsTest method testWithPattern.

@Test
public void testWithPattern() {
    String topic1 = "greetings-" + UUID.randomUUID().toString();
    String topic2 = "greetings-" + UUID.randomUUID().toString();
    String topic3 = "greetings-" + UUID.randomUUID().toString();
    companion.topics().createAndWait(topic1, 1);
    companion.topics().createAndWait(topic2, 1);
    companion.topics().createAndWait(topic3, 1);
    KafkaConsumer bean = runApplication(kafkaConfig("mp.messaging.incoming.kafka").with("value.deserializer", StringDeserializer.class.getName()).with("topic", "greetings-.+").with("pattern", true).with("auto.offset.reset", "earliest"), KafkaConsumer.class);
    await().until(this::isReady);
    await().until(this::isAlive);
    assertThat(bean.getMessages()).isEmpty();
    companion.produceStrings().usingGenerator(i -> new ProducerRecord<>(topic1, "hello"), 3);
    companion.produceStrings().usingGenerator(i -> new ProducerRecord<>(topic2, "hallo"), 3);
    companion.produceStrings().usingGenerator(i -> new ProducerRecord<>(topic3, "bonjour"), 3);
    companion.produceStrings().usingGenerator(i -> new ProducerRecord<>("do-not-match", "Bahh!"), 3);
    await().until(() -> bean.getMessages().size() >= 9);
    AtomicInteger top1 = new AtomicInteger();
    AtomicInteger top2 = new AtomicInteger();
    AtomicInteger top3 = new AtomicInteger();
    bean.getMessages().forEach(message -> {
        IncomingKafkaRecordMetadata record = message.getMetadata(IncomingKafkaRecordMetadata.class).orElse(null);
        assertThat(record).isNotNull();
        String topic = record.getTopic();
        if (topic.equals(topic1)) {
            top1.incrementAndGet();
            assertThat(message.getPayload()).isEqualTo("hello");
        } else if (topic.equals(topic2)) {
            top2.incrementAndGet();
            assertThat(message.getPayload()).isEqualTo("hallo");
        } else if (topic.equals(topic3)) {
            top3.incrementAndGet();
            assertThat(message.getPayload()).isEqualTo("bonjour");
        }
        LegacyMetadataTestUtils.tempCompareLegacyAndApiMetadata(record, message);
    });
    assertThat(top1).hasValue(3);
    assertThat(top2).hasValue(3);
    assertThat(top3).hasValue(3);
}
Also used : IncomingKafkaRecordMetadata(io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) RepeatedTest(org.junit.jupiter.api.RepeatedTest) Test(org.junit.jupiter.api.Test)

Aggregations

IncomingKafkaRecordMetadata (io.smallrye.reactive.messaging.kafka.api.IncomingKafkaRecordMetadata)9 Message (org.eclipse.microprofile.reactive.messaging.Message)3 Test (org.junit.Test)3 Test (org.junit.jupiter.api.Test)3 HealthReport (io.smallrye.reactive.messaging.health.HealthReport)2 MapBasedConfig (io.smallrye.reactive.messaging.test.common.config.MapBasedConfig)2 Instant (java.time.Instant)2 HashMap (java.util.HashMap)2 HashSet (java.util.HashSet)2 AtomicReference (java.util.concurrent.atomic.AtomicReference)2 UnsatisfiedResolutionException (javax.enterprise.inject.UnsatisfiedResolutionException)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 Header (org.apache.kafka.common.header.Header)2 Headers (org.apache.kafka.common.header.Headers)2 RepeatedTest (org.junit.jupiter.api.RepeatedTest)2 CloudEventMetadata (io.smallrye.reactive.messaging.ce.CloudEventMetadata)1 DefaultCloudEventMetadataBuilder (io.smallrye.reactive.messaging.ce.DefaultCloudEventMetadataBuilder)1 OutgoingCloudEventMetadata (io.smallrye.reactive.messaging.ce.OutgoingCloudEventMetadata)1 BaseCloudEventMetadata (io.smallrye.reactive.messaging.ce.impl.BaseCloudEventMetadata)1 DefaultIncomingCloudEventMetadata (io.smallrye.reactive.messaging.ce.impl.DefaultIncomingCloudEventMetadata)1