Search in sources :

Example 21 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka-streams-examples by confluentinc.

the class SumLambdaIntegrationTest method shouldSumEvenNumbers.

@Test
public void shouldSumEvenNumbers() throws Exception {
    List<Integer> inputValues = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
    List<Integer> expectedValues = Collections.singletonList(30);
    // 
    // Step 1: Configure and start the processor topology.
    // 
    StreamsBuilder builder = new StreamsBuilder();
    Properties streamsConfiguration = new Properties();
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "sum-lambda-integration-test");
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName());
    streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName());
    // The commit interval for flushing records to state stores and downstream must be lower than
    // this integration test's timeout (30 secs) to ensure we observe the expected processing results.
    streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 10 * 1000);
    streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    // Use a temporary directory for storing state, which will be automatically removed after the test.
    streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
    KStream<Integer, Integer> input = builder.stream(inputTopic);
    KTable<Integer, Integer> sumOfOddNumbers = input.filter((k, v) -> v % 2 == 0).selectKey((k, v) -> 1).groupByKey().reduce((v1, v2) -> v1 + v2);
    sumOfOddNumbers.toStream().to(outputTopic);
    KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration);
    streams.start();
    // 
    // Step 2: Produce some input data to the input topic.
    // 
    Properties producerConfig = new Properties();
    producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    producerConfig.put(ProducerConfig.ACKS_CONFIG, "all");
    producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0);
    producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
    producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
    IntegrationTestUtils.produceValuesSynchronously(inputTopic, inputValues, producerConfig);
    // 
    // Step 3: Verify the application's output data.
    // 
    Properties consumerConfig = new Properties();
    consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "sum-lambda-integration-test-standard-consumer");
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
    consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
    List<String> actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, outputTopic, expectedValues.size());
    streams.close();
    assertThat(actualValues).isEqualTo(expectedValues);
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsConfig(org.apache.kafka.streams.StreamsConfig) KTable(org.apache.kafka.streams.kstream.KTable) Arrays(java.util.Arrays) Properties(java.util.Properties) BeforeClass(org.junit.BeforeClass) TestUtils(org.apache.kafka.test.TestUtils) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Test(org.junit.Test) KStream(org.apache.kafka.streams.kstream.KStream) List(java.util.List) EmbeddedSingleNodeKafkaCluster(io.confluent.examples.streams.kafka.EmbeddedSingleNodeKafkaCluster) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) Serdes(org.apache.kafka.common.serialization.Serdes) KafkaStreams(org.apache.kafka.streams.KafkaStreams) ClassRule(org.junit.ClassRule) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) Collections(java.util.Collections) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Properties(java.util.Properties) Test(org.junit.Test)

Example 22 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project incubator-skywalking by apache.

the class ConsumerConstructorInterceptor method onConstruct.

@Override
public void onConstruct(EnhancedInstance objInst, Object[] allArguments) {
    ConsumerConfig config = (ConsumerConfig) allArguments[0];
    // set the bootstrap server address
    ConsumerEnhanceRequiredInfo requiredInfo = new ConsumerEnhanceRequiredInfo();
    requiredInfo.setBrokerServers(config.getList("bootstrap.servers"));
    objInst.setSkyWalkingDynamicField(requiredInfo);
}
Also used : ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig)

Example 23 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafkastreams-cep by fhussonnois.

the class CEPStockKStreamsIntegrationTest method test.

@Test
public void test() throws ExecutionException, InterruptedException {
    final Collection<KeyValue<String, String>> batch1 = Arrays.asList(new KeyValue<>(null, "{\"name\":\"e1\",\"price\":100,\"volume\":1010}"), new KeyValue<>(null, "{\"name\":\"e2\",\"price\":120,\"volume\":990}"), new KeyValue<>(null, "{\"name\":\"e3\",\"price\":120,\"volume\":1005}"), new KeyValue<>(null, "{\"name\":\"e4\",\"price\":121,\"volume\":999}"), new KeyValue<>(null, "{\"name\":\"e5\",\"price\":120,\"volume\":999}"), new KeyValue<>(null, "{\"name\":\"e6\",\"price\":125,\"volume\":750}"), new KeyValue<>(null, "{\"name\":\"e7\",\"price\":120,\"volume\":950}"), new KeyValue<>(null, "{\"name\":\"e8\",\"price\":120,\"volume\":700}"));
    IntegrationTestUtils.produceKeyValuesSynchronously(INPUT_STREAM, batch1, TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), mockTime);
    // build query
    ComplexStreamsBuilder builder = new ComplexStreamsBuilder();
    CEPStream<String, StockEvent> stream = builder.stream(INPUT_STREAM);
    KStream<String, Sequence<String, StockEvent>> stocks = stream.query("Stocks", Patterns.STOCKS);
    stocks.mapValues(seq -> {
        JSONObject json = new JSONObject();
        seq.asMap().forEach((k, v) -> {
            JSONArray events = new JSONArray();
            json.put(k, events);
            List<String> collect = v.stream().map(e -> e.value.name).collect(Collectors.toList());
            Collections.reverse(collect);
            collect.forEach(events::add);
        });
        return json.toJSONString();
    }).through(OUTPUT_STREAM, Produced.with(null, Serdes.String())).print(Printed.toSysOut());
    Topology topology = builder.build();
    kafkaStreams = new KafkaStreams(topology, streamsConfiguration);
    kafkaStreams.start();
    final Properties consumerConfig = TestUtils.consumerConfig(CLUSTER.bootstrapServers(), StringDeserializer.class, StringDeserializer.class);
    List<KeyValue<String, String>> result = IntegrationTestUtils.readKeyValues(OUTPUT_STREAM, consumerConfig, TimeUnit.SECONDS.toMillis(10), 4);
    Assert.assertEquals(4, result.size());
    Assert.assertEquals("{\"0\":[\"e1\"],\"1\":[\"e2\",\"e3\",\"e4\",\"e5\"],\"2\":[\"e6\"]}", result.get(0).value);
    Assert.assertEquals("{\"0\":[\"e3\"],\"1\":[\"e4\"],\"2\":[\"e6\"]}", result.get(1).value);
    Assert.assertEquals("{\"0\":[\"e1\"],\"1\":[\"e2\",\"e3\",\"e4\",\"e5\",\"e6\",\"e7\"],\"2\":[\"e8\"]}", result.get(2).value);
    Assert.assertEquals("{\"0\":[\"e3\"],\"1\":[\"e4\",\"e6\"],\"2\":[\"e8\"]}", result.get(3).value);
}
Also used : Sequence(com.github.fhuss.kafka.streams.cep.Sequence) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) Produced(org.apache.kafka.streams.kstream.Produced) CEPStream(com.github.fhuss.kafka.streams.cep.CEPStream) KStream(org.apache.kafka.streams.kstream.KStream) JSONArray(org.json.simple.JSONArray) MockTime(kafka.utils.MockTime) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) After(org.junit.After) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ClassRule(org.junit.ClassRule) Pattern(com.github.fhuss.kafka.streams.cep.pattern.Pattern) Printed(org.apache.kafka.streams.kstream.Printed) Before(org.junit.Before) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Test(org.junit.Test) IOException(java.io.IOException) ComplexStreamsBuilder(com.github.fhuss.kafka.streams.cep.ComplexStreamsBuilder) Collectors(java.util.stream.Collectors) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) JSONObject(org.json.simple.JSONObject) QueryBuilder(com.github.fhuss.kafka.streams.cep.pattern.QueryBuilder) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Assert(org.junit.Assert) Collections(java.util.Collections) Topology(org.apache.kafka.streams.Topology) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) ComplexStreamsBuilder(com.github.fhuss.kafka.streams.cep.ComplexStreamsBuilder) JSONArray(org.json.simple.JSONArray) Sequence(com.github.fhuss.kafka.streams.cep.Sequence) Topology(org.apache.kafka.streams.Topology) Properties(java.util.Properties) JSONObject(org.json.simple.JSONObject) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Example 24 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project samza by apache.

the class TestKafkaSystemConsumer method createConsumer.

private KafkaSystemConsumer createConsumer(String fetchMsg, String fetchBytes) {
    final Map<String, String> map = new HashMap<>();
    map.put(JobConfig.JOB_NAME, TEST_JOB);
    map.put(String.format(KafkaConfig.CONSUMER_FETCH_THRESHOLD(), TEST_SYSTEM), fetchMsg);
    map.put(String.format(KafkaConfig.CONSUMER_FETCH_THRESHOLD_BYTES(), TEST_SYSTEM), fetchBytes);
    map.put(String.format("systems.%s.consumer.%s", TEST_SYSTEM, ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG), BOOTSTRAP_SERVERS);
    map.put(JobConfig.JOB_NAME, "jobName");
    Config config = new MapConfig(map);
    String clientId = KafkaConsumerConfig.createClientId(TEST_CLIENT_ID, config);
    KafkaConsumerConfig consumerConfig = KafkaConsumerConfig.getKafkaSystemConsumerConfig(config, TEST_SYSTEM, clientId);
    final KafkaConsumer<byte[], byte[]> kafkaConsumer = new MockKafkaConsumer(consumerConfig);
    MockKafkaSystemConsumer newKafkaSystemConsumer = new MockKafkaSystemConsumer(kafkaConsumer, TEST_SYSTEM, config, TEST_CLIENT_ID, new KafkaSystemConsumerMetrics(TEST_SYSTEM, new NoOpMetricsRegistry()), System::currentTimeMillis);
    return newKafkaSystemConsumer;
}
Also used : KafkaConsumerConfig(org.apache.samza.config.KafkaConsumerConfig) NoOpMetricsRegistry(org.apache.samza.util.NoOpMetricsRegistry) HashMap(java.util.HashMap) JobConfig(org.apache.samza.config.JobConfig) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) KafkaConfig(org.apache.samza.config.KafkaConfig) KafkaConsumerConfig(org.apache.samza.config.KafkaConsumerConfig) Config(org.apache.samza.config.Config) MapConfig(org.apache.samza.config.MapConfig) MapConfig(org.apache.samza.config.MapConfig)

Example 25 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka by apache.

the class NamedTopologyIntegrationTest method shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology.

@Test
public void shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology() throws Exception {
    try {
        // for this test we have one of the topologies read from an input topic with just one partition so
        // that there's only one instance of that topology's store and thus should always have exactly one
        // StreamsMetadata returned by any of the methods that look up all hosts with a specific store and topology
        CLUSTER.createTopic(SINGLE_PARTITION_INPUT_STREAM, 1, 1);
        CLUSTER.createTopic(SINGLE_PARTITION_OUTPUT_STREAM, 1, 1);
        produceToInputTopics(SINGLE_PARTITION_INPUT_STREAM, STANDARD_INPUT_DATA);
        final String topology1Store = "store-" + TOPOLOGY_1;
        final String topology2Store = "store-" + TOPOLOGY_2;
        topology1Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
        topology2Builder.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
        streams.addNamedTopology(topology1Builder.build());
        streams.removeNamedTopology(TOPOLOGY_1);
        assertThat(streams.getTopologyByName(TOPOLOGY_1), is(Optional.empty()));
        streams.addNamedTopology(topology1Builder.build());
        streams.addNamedTopology(topology2Builder.build());
        IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(15));
        assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_1, 3), equalTo(COUNT_OUTPUT_DATA));
        assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SINGLE_PARTITION_OUTPUT_STREAM, 3), equalTo(COUNT_OUTPUT_DATA));
        final ReadOnlyKeyValueStore<String, Long> store = streams.store(NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(TOPOLOGY_1, topology1Store, QueryableStoreTypes.keyValueStore()));
        assertThat(store.get("A"), equalTo(2L));
        final Collection<StreamsMetadata> streamsMetadata = streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1);
        final Collection<StreamsMetadata> streamsMetadata2 = streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2);
        assertThat(streamsMetadata.size(), equalTo(1));
        assertThat(streamsMetadata2.size(), equalTo(1));
        final KeyQueryMetadata keyMetadata = streams.queryMetadataForKey(topology1Store, "A", new StringSerializer(), TOPOLOGY_1);
        final KeyQueryMetadata keyMetadata2 = streams.queryMetadataForKey(topology2Store, "A", new StringSerializer(), TOPOLOGY_2);
        assertThat(keyMetadata, not(NOT_AVAILABLE));
        assertThat(keyMetadata, equalTo(keyMetadata2));
        final Map<String, Map<Integer, LagInfo>> partitionLags1 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_1);
        final Map<String, Map<Integer, LagInfo>> partitionLags2 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_2);
        assertThat(partitionLags1.keySet(), equalTo(singleton(topology1Store)));
        assertThat(partitionLags1.get(topology1Store).keySet(), equalTo(mkSet(0, 1)));
        assertThat(partitionLags2.keySet(), equalTo(singleton(topology2Store)));
        // only one copy of the store in topology-2
        assertThat(partitionLags2.get(topology2Store).keySet(), equalTo(singleton(0)));
        // Start up a second node with both topologies
        setupSecondKafkaStreams();
        topology1Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
        topology2Builder2.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
        streams2.start(asList(topology1Builder2.build(), topology2Builder2.build()));
        waitForApplicationState(asList(streams, streams2), State.RUNNING, Duration.ofSeconds(30));
        verifyMetadataForTopology(TOPOLOGY_1, streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1), streams2.streamsMetadataForStore(topology1Store, TOPOLOGY_1));
        verifyMetadataForTopology(TOPOLOGY_2, streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2), streams2.streamsMetadataForStore(topology2Store, TOPOLOGY_2));
        verifyMetadataForTopology(TOPOLOGY_1, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_1), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_1));
        verifyMetadataForTopology(TOPOLOGY_2, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_2), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_2));
    } finally {
        CLUSTER.deleteTopics(SINGLE_PARTITION_INPUT_STREAM, SINGLE_PARTITION_OUTPUT_STREAM);
    }
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) Stores(org.apache.kafka.streams.state.Stores) StreamsException(org.apache.kafka.streams.errors.StreamsException) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) Collections.singletonList(java.util.Collections.singletonList) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) Collections.singleton(java.util.Collections.singleton) Arrays.asList(java.util.Arrays.asList) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Map(java.util.Map) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ClientUtils.extractThreadId(org.apache.kafka.streams.processor.internals.ClientUtils.extractThreadId) MissingSourceTopicException(org.apache.kafka.streams.errors.MissingSourceTopicException) TopicPartition(org.apache.kafka.common.TopicPartition) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) KafkaClientSupplier(org.apache.kafka.streams.KafkaClientSupplier) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) State(org.apache.kafka.streams.KafkaStreams.State) Collectors(java.util.stream.Collectors) Bytes(org.apache.kafka.common.utils.Bytes) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) Materialized(org.apache.kafka.streams.kstream.Materialized) Optional(java.util.Optional) AddNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult) Queue(java.util.Queue) Pattern(java.util.regex.Pattern) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) CoreMatchers.not(org.hamcrest.CoreMatchers.not) NamedTopologyStoreQueryParameters(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyStoreQueryParameters) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) TestUtils.retryOnExceptionWithTimeout(org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout) KeyValue.pair(org.apache.kafka.streams.KeyValue.pair) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) CoreMatchers.nullValue(org.hamcrest.CoreMatchers.nullValue) Before(org.junit.Before) IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived) KTable(org.apache.kafka.streams.kstream.KTable) IntegrationTestUtils.waitForApplicationState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForApplicationState) Properties(java.util.Properties) StreamsUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler) Iterator(java.util.Iterator) Consumed(org.apache.kafka.streams.kstream.Consumed) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) Test(org.junit.Test) RemoveNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.RemoveNamedTopologyResult) NOT_AVAILABLE(org.apache.kafka.streams.KeyQueryMetadata.NOT_AVAILABLE) Rule(org.junit.Rule) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) LagInfo(org.apache.kafka.streams.LagInfo) UniqueTopicSerdeScope(org.apache.kafka.streams.utils.UniqueTopicSerdeScope) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Map(java.util.Map) HashMap(java.util.HashMap) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) Test(org.junit.Test)

Aggregations

ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)35 List (java.util.List)31 Map (java.util.Map)24 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 Pattern (java.util.regex.Pattern)23 Optional (java.util.Optional)22 TimeUnit (java.util.concurrent.TimeUnit)20 UUID (java.util.UUID)19 Handler (io.vertx.core.Handler)18 Vertx (io.vertx.core.Vertx)18 Buffer (io.vertx.core.buffer.Buffer)18 KafkaConsumerRecord (io.vertx.kafka.client.consumer.KafkaConsumerRecord)18 Logger (org.slf4j.Logger)18 LoggerFactory (org.slf4j.LoggerFactory)18 Instant (java.time.Instant)17 HashMap (java.util.HashMap)17 Truth.assertThat (com.google.common.truth.Truth.assertThat)16 Future (io.vertx.core.Future)16 Promise (io.vertx.core.Promise)16