Search in sources :

Example 1 with KafkaStreamsWrapper

use of org.apache.kafka.streams.KafkaStreamsWrapper in project kafka by apache.

the class LagFetchIntegrationTest method shouldFetchLagsDuringRebalancing.

private void shouldFetchLagsDuringRebalancing(final String optimization) throws Exception {
    final CountDownLatch latchTillActiveIsRunning = new CountDownLatch(1);
    final CountDownLatch latchTillStandbyIsRunning = new CountDownLatch(1);
    final CountDownLatch latchTillStandbyHasPartitionsAssigned = new CountDownLatch(1);
    final CyclicBarrier lagCheckBarrier = new CyclicBarrier(2);
    final List<KafkaStreamsWrapper> streamsList = new ArrayList<>();
    IntegrationTestUtils.produceKeyValuesSynchronously(inputTopicName, mkSet(new KeyValue<>("k1", 1L), new KeyValue<>("k2", 2L), new KeyValue<>("k3", 3L), new KeyValue<>("k4", 4L), new KeyValue<>("k5", 5L)), TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, LongSerializer.class, new Properties()), mockTime);
    // create stream threads
    for (int i = 0; i < 2; i++) {
        final Properties props = (Properties) streamsConfiguration.clone();
        // this test relies on the second instance getting the standby, so we specify
        // an assignor with this contract.
        props.put(StreamsConfig.InternalConfig.INTERNAL_TASK_ASSIGNOR_CLASS, FallbackPriorTaskAssignor.class.getName());
        props.put(StreamsConfig.APPLICATION_SERVER_CONFIG, "localhost:" + i);
        props.put(StreamsConfig.CLIENT_ID_CONFIG, "instance-" + i);
        props.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, optimization);
        props.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1);
        props.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory(stateStoreName + i).getAbsolutePath());
        final StreamsBuilder builder = new StreamsBuilder();
        final KTable<String, Long> t1 = builder.table(inputTopicName, Materialized.as(stateStoreName));
        t1.toStream().to(outputTopicName);
        final KafkaStreamsWrapper streams = new KafkaStreamsWrapper(builder.build(props), props);
        streamsList.add(streams);
    }
    final KafkaStreamsWrapper activeStreams = streamsList.get(0);
    final KafkaStreamsWrapper standbyStreams = streamsList.get(1);
    activeStreams.setStreamThreadStateListener((thread, newState, oldState) -> {
        if (newState == StreamThread.State.RUNNING) {
            latchTillActiveIsRunning.countDown();
        }
    });
    standbyStreams.setStreamThreadStateListener((thread, newState, oldState) -> {
        if (oldState == StreamThread.State.PARTITIONS_ASSIGNED && newState == StreamThread.State.RUNNING) {
            latchTillStandbyHasPartitionsAssigned.countDown();
            try {
                lagCheckBarrier.await(60, TimeUnit.SECONDS);
            } catch (final Exception e) {
                throw new RuntimeException(e);
            }
        } else if (newState == StreamThread.State.RUNNING) {
            latchTillStandbyIsRunning.countDown();
        }
    });
    try {
        // First start up the active.
        TestUtils.waitForCondition(() -> activeStreams.allLocalStorePartitionLags().size() == 0, WAIT_TIMEOUT_MS, "Should see empty lag map before streams is started.");
        activeStreams.start();
        latchTillActiveIsRunning.await(60, TimeUnit.SECONDS);
        IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfiguration, outputTopicName, 5, WAIT_TIMEOUT_MS);
        // Check the active reports proper lag values.
        Map<String, Map<Integer, LagInfo>> offsetLagInfoMap = getFirstNonEmptyLagMap(activeStreams);
        assertThat(offsetLagInfoMap.size(), equalTo(1));
        assertThat(offsetLagInfoMap.keySet(), equalTo(mkSet(stateStoreName)));
        assertThat(offsetLagInfoMap.get(stateStoreName).size(), equalTo(1));
        LagInfo lagInfo = offsetLagInfoMap.get(stateStoreName).get(0);
        assertThat(lagInfo.currentOffsetPosition(), equalTo(5L));
        assertThat(lagInfo.endOffsetPosition(), equalTo(5L));
        assertThat(lagInfo.offsetLag(), equalTo(0L));
        // start up the standby & make it pause right after it has partition assigned
        standbyStreams.start();
        latchTillStandbyHasPartitionsAssigned.await(60, TimeUnit.SECONDS);
        offsetLagInfoMap = getFirstNonEmptyLagMap(standbyStreams);
        assertThat(offsetLagInfoMap.size(), equalTo(1));
        assertThat(offsetLagInfoMap.keySet(), equalTo(mkSet(stateStoreName)));
        assertThat(offsetLagInfoMap.get(stateStoreName).size(), equalTo(1));
        lagInfo = offsetLagInfoMap.get(stateStoreName).get(0);
        assertThat(lagInfo.currentOffsetPosition(), equalTo(0L));
        assertThat(lagInfo.endOffsetPosition(), equalTo(5L));
        assertThat(lagInfo.offsetLag(), equalTo(5L));
        // standby thread wont proceed to RUNNING before this barrier is crossed
        lagCheckBarrier.await(60, TimeUnit.SECONDS);
        // wait till the lag goes down to 0, on the standby
        TestUtils.waitForCondition(() -> standbyStreams.allLocalStorePartitionLags().get(stateStoreName).get(0).offsetLag() == 0, WAIT_TIMEOUT_MS, "Standby should eventually catchup and have zero lag.");
    } finally {
        for (final KafkaStreams streams : streamsList) {
            streams.close();
        }
    }
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) Properties(java.util.Properties) LagInfo(org.apache.kafka.streams.LagInfo) KafkaStreamsWrapper(org.apache.kafka.streams.KafkaStreamsWrapper) IOException(java.io.IOException) CyclicBarrier(java.util.concurrent.CyclicBarrier) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) FallbackPriorTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.FallbackPriorTaskAssignor) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Map(java.util.Map) HashMap(java.util.HashMap)

Example 2 with KafkaStreamsWrapper

use of org.apache.kafka.streams.KafkaStreamsWrapper in project kafka by apache.

the class JoinWithIncompleteMetadataIntegrationTest method testShouldAutoShutdownOnJoinWithIncompleteMetadata.

@Test
public void testShouldAutoShutdownOnJoinWithIncompleteMetadata() throws InterruptedException {
    STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID);
    STREAMS_CONFIG.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    final KStream<Long, String> notExistStream = builder.stream(NON_EXISTENT_INPUT_TOPIC_LEFT);
    final KTable<Long, String> aggregatedTable = notExistStream.leftJoin(rightTable, valueJoiner).groupBy((key, value) -> key).reduce((value1, value2) -> value1 + value2);
    // Write the (continuously updating) results to the output topic.
    aggregatedTable.toStream().to(OUTPUT_TOPIC);
    final KafkaStreamsWrapper streams = new KafkaStreamsWrapper(builder.build(), STREAMS_CONFIG);
    final IntegrationTestUtils.StateListenerStub listener = new IntegrationTestUtils.StateListenerStub();
    streams.setStreamThreadStateListener(listener);
    streams.start();
    TestUtils.waitForCondition(listener::transitToPendingShutdownSeen, "Did not seen thread state transited to PENDING_SHUTDOWN");
    streams.close();
    assertTrue(listener.transitToPendingShutdownSeen());
}
Also used : KafkaStreamsWrapper(org.apache.kafka.streams.KafkaStreamsWrapper) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsConfig(org.apache.kafka.streams.StreamsConfig) KTable(org.apache.kafka.streams.kstream.KTable) AfterClass(org.junit.AfterClass) Properties(java.util.Properties) BeforeClass(org.junit.BeforeClass) TestUtils(org.apache.kafka.test.TestUtils) IntegrationTest(org.apache.kafka.test.IntegrationTest) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Test(org.junit.Test) KStream(org.apache.kafka.streams.kstream.KStream) Category(org.junit.experimental.categories.Category) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) Rule(org.junit.Rule) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) ValueJoiner(org.apache.kafka.streams.kstream.ValueJoiner) After(org.junit.After) Serdes(org.apache.kafka.common.serialization.Serdes) TemporaryFolder(org.junit.rules.TemporaryFolder) Before(org.junit.Before) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) KafkaStreamsWrapper(org.apache.kafka.streams.KafkaStreamsWrapper) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Aggregations

IOException (java.io.IOException)2 Properties (java.util.Properties)2 KafkaStreamsWrapper (org.apache.kafka.streams.KafkaStreamsWrapper)2 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)2 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 CyclicBarrier (java.util.concurrent.CyclicBarrier)1 ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)1 LongSerializer (org.apache.kafka.common.serialization.LongSerializer)1 Serdes (org.apache.kafka.common.serialization.Serdes)1 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)1 KafkaStreams (org.apache.kafka.streams.KafkaStreams)1 KeyValue (org.apache.kafka.streams.KeyValue)1 LagInfo (org.apache.kafka.streams.LagInfo)1 StreamsConfig (org.apache.kafka.streams.StreamsConfig)1 EmbeddedKafkaCluster (org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster)1 IntegrationTestUtils (org.apache.kafka.streams.integration.utils.IntegrationTestUtils)1 KStream (org.apache.kafka.streams.kstream.KStream)1