Search in sources :

Example 1 with LagInfo

use of org.apache.kafka.streams.LagInfo in project kafka by apache.

the class LagFetchIntegrationTest method shouldFetchLagsDuringRestoration.

@Test
public void shouldFetchLagsDuringRestoration() throws Exception {
    IntegrationTestUtils.produceKeyValuesSynchronously(inputTopicName, mkSet(new KeyValue<>("k1", 1L), new KeyValue<>("k2", 2L), new KeyValue<>("k3", 3L), new KeyValue<>("k4", 4L), new KeyValue<>("k5", 5L)), TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, LongSerializer.class, new Properties()), mockTime);
    // create stream threads
    final Properties props = (Properties) streamsConfiguration.clone();
    final File stateDir = TestUtils.tempDirectory(stateStoreName + "0");
    props.put(StreamsConfig.APPLICATION_SERVER_CONFIG, "localhost:0");
    props.put(StreamsConfig.CLIENT_ID_CONFIG, "instance-0");
    props.put(StreamsConfig.STATE_DIR_CONFIG, stateDir.getAbsolutePath());
    final StreamsBuilder builder = new StreamsBuilder();
    final KTable<String, Long> t1 = builder.table(inputTopicName, Materialized.as(stateStoreName));
    t1.toStream().to(outputTopicName);
    final KafkaStreams streams = new KafkaStreams(builder.build(), props);
    try {
        // First start up the active.
        TestUtils.waitForCondition(() -> streams.allLocalStorePartitionLags().size() == 0, WAIT_TIMEOUT_MS, "Should see empty lag map before streams is started.");
        // Get the instance to fully catch up and reach RUNNING state
        startApplicationAndWaitUntilRunning(Collections.singletonList(streams), Duration.ofSeconds(60));
        IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfiguration, outputTopicName, 5, WAIT_TIMEOUT_MS);
        // check for proper lag values.
        final AtomicReference<LagInfo> zeroLagRef = new AtomicReference<>();
        TestUtils.waitForCondition(() -> {
            final Map<String, Map<Integer, LagInfo>> offsetLagInfoMap = streams.allLocalStorePartitionLags();
            assertThat(offsetLagInfoMap.size(), equalTo(1));
            assertThat(offsetLagInfoMap.keySet(), equalTo(mkSet(stateStoreName)));
            assertThat(offsetLagInfoMap.get(stateStoreName).size(), equalTo(1));
            final LagInfo zeroLagInfo = offsetLagInfoMap.get(stateStoreName).get(0);
            assertThat(zeroLagInfo.currentOffsetPosition(), equalTo(5L));
            assertThat(zeroLagInfo.endOffsetPosition(), equalTo(5L));
            assertThat(zeroLagInfo.offsetLag(), equalTo(0L));
            zeroLagRef.set(zeroLagInfo);
            return true;
        }, WAIT_TIMEOUT_MS, "Eventually should reach zero lag.");
        // Kill instance, delete state to force restoration.
        assertThat("Streams instance did not close within timeout", streams.close(Duration.ofSeconds(60)));
        IntegrationTestUtils.purgeLocalStreamsState(streamsConfiguration);
        Files.walk(stateDir.toPath()).sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(f -> assertTrue("Some state " + f + " could not be deleted", f.delete()));
        // wait till the lag goes down to 0
        final KafkaStreams restartedStreams = new KafkaStreams(builder.build(), props);
        // set a state restoration listener to track progress of restoration
        final CountDownLatch restorationEndLatch = new CountDownLatch(1);
        final Map<String, Map<Integer, LagInfo>> restoreStartLagInfo = new HashMap<>();
        final Map<String, Map<Integer, LagInfo>> restoreEndLagInfo = new HashMap<>();
        restartedStreams.setGlobalStateRestoreListener(new StateRestoreListener() {

            @Override
            public void onRestoreStart(final TopicPartition topicPartition, final String storeName, final long startingOffset, final long endingOffset) {
                try {
                    restoreStartLagInfo.putAll(getFirstNonEmptyLagMap(restartedStreams));
                } catch (final Exception e) {
                    LOG.error("Exception while trying to obtain lag map", e);
                }
            }

            @Override
            public void onBatchRestored(final TopicPartition topicPartition, final String storeName, final long batchEndOffset, final long numRestored) {
            }

            @Override
            public void onRestoreEnd(final TopicPartition topicPartition, final String storeName, final long totalRestored) {
                try {
                    restoreEndLagInfo.putAll(getFirstNonEmptyLagMap(restartedStreams));
                } catch (final Exception e) {
                    LOG.error("Exception while trying to obtain lag map", e);
                }
                restorationEndLatch.countDown();
            }
        });
        restartedStreams.start();
        restorationEndLatch.await(WAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS);
        TestUtils.waitForCondition(() -> restartedStreams.allLocalStorePartitionLags().get(stateStoreName).get(0).offsetLag() == 0, WAIT_TIMEOUT_MS, "Standby should eventually catchup and have zero lag.");
        final LagInfo fullLagInfo = restoreStartLagInfo.get(stateStoreName).get(0);
        assertThat(fullLagInfo.currentOffsetPosition(), equalTo(0L));
        assertThat(fullLagInfo.endOffsetPosition(), equalTo(5L));
        assertThat(fullLagInfo.offsetLag(), equalTo(5L));
        assertThat(restoreEndLagInfo.get(stateStoreName).get(0), equalTo(zeroLagRef.get()));
    } finally {
        streams.close();
        streams.cleanUp();
    }
}
Also used : StateRestoreListener(org.apache.kafka.streams.processor.StateRestoreListener) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) Properties(java.util.Properties) CountDownLatch(java.util.concurrent.CountDownLatch) LagInfo(org.apache.kafka.streams.LagInfo) IOException(java.io.IOException) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) TopicPartition(org.apache.kafka.common.TopicPartition) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) File(java.io.File) Map(java.util.Map) HashMap(java.util.HashMap) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 2 with LagInfo

use of org.apache.kafka.streams.LagInfo in project kafka by apache.

the class LagFetchIntegrationTest method shouldFetchLagsDuringRebalancing.

private void shouldFetchLagsDuringRebalancing(final String optimization) throws Exception {
    final CountDownLatch latchTillActiveIsRunning = new CountDownLatch(1);
    final CountDownLatch latchTillStandbyIsRunning = new CountDownLatch(1);
    final CountDownLatch latchTillStandbyHasPartitionsAssigned = new CountDownLatch(1);
    final CyclicBarrier lagCheckBarrier = new CyclicBarrier(2);
    final List<KafkaStreamsWrapper> streamsList = new ArrayList<>();
    IntegrationTestUtils.produceKeyValuesSynchronously(inputTopicName, mkSet(new KeyValue<>("k1", 1L), new KeyValue<>("k2", 2L), new KeyValue<>("k3", 3L), new KeyValue<>("k4", 4L), new KeyValue<>("k5", 5L)), TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, LongSerializer.class, new Properties()), mockTime);
    // create stream threads
    for (int i = 0; i < 2; i++) {
        final Properties props = (Properties) streamsConfiguration.clone();
        // this test relies on the second instance getting the standby, so we specify
        // an assignor with this contract.
        props.put(StreamsConfig.InternalConfig.INTERNAL_TASK_ASSIGNOR_CLASS, FallbackPriorTaskAssignor.class.getName());
        props.put(StreamsConfig.APPLICATION_SERVER_CONFIG, "localhost:" + i);
        props.put(StreamsConfig.CLIENT_ID_CONFIG, "instance-" + i);
        props.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, optimization);
        props.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1);
        props.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory(stateStoreName + i).getAbsolutePath());
        final StreamsBuilder builder = new StreamsBuilder();
        final KTable<String, Long> t1 = builder.table(inputTopicName, Materialized.as(stateStoreName));
        t1.toStream().to(outputTopicName);
        final KafkaStreamsWrapper streams = new KafkaStreamsWrapper(builder.build(props), props);
        streamsList.add(streams);
    }
    final KafkaStreamsWrapper activeStreams = streamsList.get(0);
    final KafkaStreamsWrapper standbyStreams = streamsList.get(1);
    activeStreams.setStreamThreadStateListener((thread, newState, oldState) -> {
        if (newState == StreamThread.State.RUNNING) {
            latchTillActiveIsRunning.countDown();
        }
    });
    standbyStreams.setStreamThreadStateListener((thread, newState, oldState) -> {
        if (oldState == StreamThread.State.PARTITIONS_ASSIGNED && newState == StreamThread.State.RUNNING) {
            latchTillStandbyHasPartitionsAssigned.countDown();
            try {
                lagCheckBarrier.await(60, TimeUnit.SECONDS);
            } catch (final Exception e) {
                throw new RuntimeException(e);
            }
        } else if (newState == StreamThread.State.RUNNING) {
            latchTillStandbyIsRunning.countDown();
        }
    });
    try {
        // First start up the active.
        TestUtils.waitForCondition(() -> activeStreams.allLocalStorePartitionLags().size() == 0, WAIT_TIMEOUT_MS, "Should see empty lag map before streams is started.");
        activeStreams.start();
        latchTillActiveIsRunning.await(60, TimeUnit.SECONDS);
        IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfiguration, outputTopicName, 5, WAIT_TIMEOUT_MS);
        // Check the active reports proper lag values.
        Map<String, Map<Integer, LagInfo>> offsetLagInfoMap = getFirstNonEmptyLagMap(activeStreams);
        assertThat(offsetLagInfoMap.size(), equalTo(1));
        assertThat(offsetLagInfoMap.keySet(), equalTo(mkSet(stateStoreName)));
        assertThat(offsetLagInfoMap.get(stateStoreName).size(), equalTo(1));
        LagInfo lagInfo = offsetLagInfoMap.get(stateStoreName).get(0);
        assertThat(lagInfo.currentOffsetPosition(), equalTo(5L));
        assertThat(lagInfo.endOffsetPosition(), equalTo(5L));
        assertThat(lagInfo.offsetLag(), equalTo(0L));
        // start up the standby & make it pause right after it has partition assigned
        standbyStreams.start();
        latchTillStandbyHasPartitionsAssigned.await(60, TimeUnit.SECONDS);
        offsetLagInfoMap = getFirstNonEmptyLagMap(standbyStreams);
        assertThat(offsetLagInfoMap.size(), equalTo(1));
        assertThat(offsetLagInfoMap.keySet(), equalTo(mkSet(stateStoreName)));
        assertThat(offsetLagInfoMap.get(stateStoreName).size(), equalTo(1));
        lagInfo = offsetLagInfoMap.get(stateStoreName).get(0);
        assertThat(lagInfo.currentOffsetPosition(), equalTo(0L));
        assertThat(lagInfo.endOffsetPosition(), equalTo(5L));
        assertThat(lagInfo.offsetLag(), equalTo(5L));
        // standby thread wont proceed to RUNNING before this barrier is crossed
        lagCheckBarrier.await(60, TimeUnit.SECONDS);
        // wait till the lag goes down to 0, on the standby
        TestUtils.waitForCondition(() -> standbyStreams.allLocalStorePartitionLags().get(stateStoreName).get(0).offsetLag() == 0, WAIT_TIMEOUT_MS, "Standby should eventually catchup and have zero lag.");
    } finally {
        for (final KafkaStreams streams : streamsList) {
            streams.close();
        }
    }
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) Properties(java.util.Properties) LagInfo(org.apache.kafka.streams.LagInfo) KafkaStreamsWrapper(org.apache.kafka.streams.KafkaStreamsWrapper) IOException(java.io.IOException) CyclicBarrier(java.util.concurrent.CyclicBarrier) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) FallbackPriorTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.FallbackPriorTaskAssignor) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Map(java.util.Map) HashMap(java.util.HashMap)

Example 3 with LagInfo

use of org.apache.kafka.streams.LagInfo in project kafka by apache.

the class NamedTopologyIntegrationTest method shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology.

@Test
public void shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology() throws Exception {
    try {
        // for this test we have one of the topologies read from an input topic with just one partition so
        // that there's only one instance of that topology's store and thus should always have exactly one
        // StreamsMetadata returned by any of the methods that look up all hosts with a specific store and topology
        CLUSTER.createTopic(SINGLE_PARTITION_INPUT_STREAM, 1, 1);
        CLUSTER.createTopic(SINGLE_PARTITION_OUTPUT_STREAM, 1, 1);
        produceToInputTopics(SINGLE_PARTITION_INPUT_STREAM, STANDARD_INPUT_DATA);
        final String topology1Store = "store-" + TOPOLOGY_1;
        final String topology2Store = "store-" + TOPOLOGY_2;
        topology1Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
        topology2Builder.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
        streams.addNamedTopology(topology1Builder.build());
        streams.removeNamedTopology(TOPOLOGY_1);
        assertThat(streams.getTopologyByName(TOPOLOGY_1), is(Optional.empty()));
        streams.addNamedTopology(topology1Builder.build());
        streams.addNamedTopology(topology2Builder.build());
        IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(15));
        assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_1, 3), equalTo(COUNT_OUTPUT_DATA));
        assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SINGLE_PARTITION_OUTPUT_STREAM, 3), equalTo(COUNT_OUTPUT_DATA));
        final ReadOnlyKeyValueStore<String, Long> store = streams.store(NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(TOPOLOGY_1, topology1Store, QueryableStoreTypes.keyValueStore()));
        assertThat(store.get("A"), equalTo(2L));
        final Collection<StreamsMetadata> streamsMetadata = streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1);
        final Collection<StreamsMetadata> streamsMetadata2 = streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2);
        assertThat(streamsMetadata.size(), equalTo(1));
        assertThat(streamsMetadata2.size(), equalTo(1));
        final KeyQueryMetadata keyMetadata = streams.queryMetadataForKey(topology1Store, "A", new StringSerializer(), TOPOLOGY_1);
        final KeyQueryMetadata keyMetadata2 = streams.queryMetadataForKey(topology2Store, "A", new StringSerializer(), TOPOLOGY_2);
        assertThat(keyMetadata, not(NOT_AVAILABLE));
        assertThat(keyMetadata, equalTo(keyMetadata2));
        final Map<String, Map<Integer, LagInfo>> partitionLags1 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_1);
        final Map<String, Map<Integer, LagInfo>> partitionLags2 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_2);
        assertThat(partitionLags1.keySet(), equalTo(singleton(topology1Store)));
        assertThat(partitionLags1.get(topology1Store).keySet(), equalTo(mkSet(0, 1)));
        assertThat(partitionLags2.keySet(), equalTo(singleton(topology2Store)));
        // only one copy of the store in topology-2
        assertThat(partitionLags2.get(topology2Store).keySet(), equalTo(singleton(0)));
        // Start up a second node with both topologies
        setupSecondKafkaStreams();
        topology1Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
        topology2Builder2.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
        streams2.start(asList(topology1Builder2.build(), topology2Builder2.build()));
        waitForApplicationState(asList(streams, streams2), State.RUNNING, Duration.ofSeconds(30));
        verifyMetadataForTopology(TOPOLOGY_1, streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1), streams2.streamsMetadataForStore(topology1Store, TOPOLOGY_1));
        verifyMetadataForTopology(TOPOLOGY_2, streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2), streams2.streamsMetadataForStore(topology2Store, TOPOLOGY_2));
        verifyMetadataForTopology(TOPOLOGY_1, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_1), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_1));
        verifyMetadataForTopology(TOPOLOGY_2, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_2), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_2));
    } finally {
        CLUSTER.deleteTopics(SINGLE_PARTITION_INPUT_STREAM, SINGLE_PARTITION_OUTPUT_STREAM);
    }
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) Stores(org.apache.kafka.streams.state.Stores) StreamsException(org.apache.kafka.streams.errors.StreamsException) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) Collections.singletonList(java.util.Collections.singletonList) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) Collections.singleton(java.util.Collections.singleton) Arrays.asList(java.util.Arrays.asList) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Map(java.util.Map) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ClientUtils.extractThreadId(org.apache.kafka.streams.processor.internals.ClientUtils.extractThreadId) MissingSourceTopicException(org.apache.kafka.streams.errors.MissingSourceTopicException) TopicPartition(org.apache.kafka.common.TopicPartition) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) KafkaClientSupplier(org.apache.kafka.streams.KafkaClientSupplier) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) State(org.apache.kafka.streams.KafkaStreams.State) Collectors(java.util.stream.Collectors) Bytes(org.apache.kafka.common.utils.Bytes) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) Materialized(org.apache.kafka.streams.kstream.Materialized) Optional(java.util.Optional) AddNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult) Queue(java.util.Queue) Pattern(java.util.regex.Pattern) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) CoreMatchers.not(org.hamcrest.CoreMatchers.not) NamedTopologyStoreQueryParameters(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyStoreQueryParameters) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) TestUtils.retryOnExceptionWithTimeout(org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout) KeyValue.pair(org.apache.kafka.streams.KeyValue.pair) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) CoreMatchers.nullValue(org.hamcrest.CoreMatchers.nullValue) Before(org.junit.Before) IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived) KTable(org.apache.kafka.streams.kstream.KTable) IntegrationTestUtils.waitForApplicationState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForApplicationState) Properties(java.util.Properties) StreamsUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler) Iterator(java.util.Iterator) Consumed(org.apache.kafka.streams.kstream.Consumed) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) Test(org.junit.Test) RemoveNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.RemoveNamedTopologyResult) NOT_AVAILABLE(org.apache.kafka.streams.KeyQueryMetadata.NOT_AVAILABLE) Rule(org.junit.Rule) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) LagInfo(org.apache.kafka.streams.LagInfo) UniqueTopicSerdeScope(org.apache.kafka.streams.utils.UniqueTopicSerdeScope) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Map(java.util.Map) HashMap(java.util.HashMap) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) Test(org.junit.Test)

Aggregations

HashMap (java.util.HashMap)3 Map (java.util.Map)3 Properties (java.util.Properties)3 LongSerializer (org.apache.kafka.common.serialization.LongSerializer)3 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)3 KeyValue (org.apache.kafka.streams.KeyValue)3 LagInfo (org.apache.kafka.streams.LagInfo)3 IOException (java.io.IOException)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 KafkaStreams (org.apache.kafka.streams.KafkaStreams)2 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)2 File (java.io.File)1 Duration (java.time.Duration)1 ArrayList (java.util.ArrayList)1 Arrays.asList (java.util.Arrays.asList)1 Collection (java.util.Collection)1 Collections.singleton (java.util.Collections.singleton)1 Collections.singletonList (java.util.Collections.singletonList)1 Iterator (java.util.Iterator)1