Search in sources :

Example 6 with KeyQueryMetadata

use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.

the class StoreQueryIntegrationTest method shouldQuerySpecificStalePartitionStoresMultiStreamThreads.

@Test
public void shouldQuerySpecificStalePartitionStoresMultiStreamThreads() throws Exception {
    final int batch1NumMessages = 100;
    final int key = 1;
    final Semaphore semaphore = new Semaphore(0);
    final int numStreamThreads = 2;
    final StreamsBuilder builder = new StreamsBuilder();
    getStreamsBuilderWithTopology(builder, semaphore);
    final Properties streamsConfiguration1 = streamsConfiguration();
    streamsConfiguration1.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
    final Properties streamsConfiguration2 = streamsConfiguration();
    streamsConfiguration2.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
    final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration1);
    final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration2);
    final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
    startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
    assertTrue(kafkaStreams1.metadataForLocalThreads().size() > 1);
    assertTrue(kafkaStreams2.metadataForLocalThreads().size() > 1);
    produceValueRange(key, 0, batch1NumMessages);
    // Assert that all messages in the first batch were processed in a timely manner
    assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
    final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, new IntegerSerializer());
    // key belongs to this partition
    final int keyPartition = keyQueryMetadata.partition();
    // key doesn't belongs to this partition
    final int keyDontBelongPartition = (keyPartition == 0) ? 1 : 0;
    final QueryableStoreType<ReadOnlyKeyValueStore<Integer, Integer>> queryableStoreType = keyValueStore();
    // Assert that both active and standby are able to query for a key
    final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> param = StoreQueryParameters.fromNameAndType(TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyPartition);
    TestUtils.waitForCondition(() -> {
        final ReadOnlyKeyValueStore<Integer, Integer> store1 = getStore(kafkaStreams1, param);
        return store1.get(key) != null;
    }, "store1 cannot find results for key");
    TestUtils.waitForCondition(() -> {
        final ReadOnlyKeyValueStore<Integer, Integer> store2 = getStore(kafkaStreams2, param);
        return store2.get(key) != null;
    }, "store2 cannot find results for key");
    final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> otherParam = StoreQueryParameters.fromNameAndType(TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyDontBelongPartition);
    final ReadOnlyKeyValueStore<Integer, Integer> store3 = getStore(kafkaStreams1, otherParam);
    final ReadOnlyKeyValueStore<Integer, Integer> store4 = getStore(kafkaStreams2, otherParam);
    // Assert that
    assertThat(store3.get(key), is(nullValue()));
    assertThat(store4.get(key), is(nullValue()));
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) Semaphore(java.util.concurrent.Semaphore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) Properties(java.util.Properties) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 7 with KeyQueryMetadata

use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.

the class OptimizedKTableIntegrationTest method shouldApplyUpdatesToStandbyStore.

@Test
public void shouldApplyUpdatesToStandbyStore() throws Exception {
    final int batch1NumMessages = 100;
    final int batch2NumMessages = 100;
    final int key = 1;
    final Semaphore semaphore = new Semaphore(0);
    final StreamsBuilder builder = new StreamsBuilder();
    builder.table(INPUT_TOPIC_NAME, Consumed.with(Serdes.Integer(), Serdes.Integer()), Materialized.<Integer, Integer, KeyValueStore<Bytes, byte[]>>as(TABLE_NAME).withCachingDisabled()).toStream().peek((k, v) -> semaphore.release());
    final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration());
    final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration());
    final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
    startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
    produceValueRange(key, 0, batch1NumMessages);
    // Assert that all messages in the first batch were processed in a timely manner
    assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
    final ReadOnlyKeyValueStore<Integer, Integer> store1 = IntegrationTestUtils.getStore(TABLE_NAME, kafkaStreams1, QueryableStoreTypes.keyValueStore());
    final ReadOnlyKeyValueStore<Integer, Integer> store2 = IntegrationTestUtils.getStore(TABLE_NAME, kafkaStreams2, QueryableStoreTypes.keyValueStore());
    final boolean kafkaStreams1WasFirstActive;
    final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, (topic, somekey, value, numPartitions) -> 0);
    // Assert that the current value in store reflects all messages being processed
    if ((keyQueryMetadata.activeHost().port() % 2) == 1) {
        assertThat(store1.get(key), is(equalTo(batch1NumMessages - 1)));
        kafkaStreams1WasFirstActive = true;
    } else {
        assertThat(store2.get(key), is(equalTo(batch1NumMessages - 1)));
        kafkaStreams1WasFirstActive = false;
    }
    if (kafkaStreams1WasFirstActive) {
        kafkaStreams1.close();
    } else {
        kafkaStreams2.close();
    }
    final ReadOnlyKeyValueStore<Integer, Integer> newActiveStore = kafkaStreams1WasFirstActive ? store2 : store1;
    TestUtils.retryOnExceptionWithTimeout(60 * 1000, 100, () -> {
        // Assert that after failover we have recovered to the last store write
        assertThat(newActiveStore.get(key), is(equalTo(batch1NumMessages - 1)));
    });
    final int totalNumMessages = batch1NumMessages + batch2NumMessages;
    produceValueRange(key, batch1NumMessages, totalNumMessages);
    // Assert that all messages in the second batch were processed in a timely manner
    assertThat(semaphore.tryAcquire(batch2NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
    TestUtils.retryOnExceptionWithTimeout(60 * 1000, 100, () -> {
        // Assert that the current value in store reflects all messages being processed
        assertThat(newActiveStore.get(key), is(equalTo(totalNumMessages - 1)));
    });
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) Semaphore(java.util.concurrent.Semaphore) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 8 with KeyQueryMetadata

use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.

the class QueryableStateIntegrationTest method verifyAllWindowedKeys.

private void verifyAllWindowedKeys(final List<KafkaStreams> streamsList, final KafkaStreams streams, final KafkaStreamsTest.StateListenerStub stateListenerStub, final Set<String> keys, final String storeName, final Long from, final Long to, final long timeout, final boolean pickInstanceByPort) throws Exception {
    retryOnExceptionWithTimeout(timeout, () -> {
        final List<String> noMetadataKeys = new ArrayList<>();
        final List<String> nullStoreKeys = new ArrayList<>();
        final List<String> nullValueKeys = new ArrayList<>();
        final Map<String, Exception> exceptionalKeys = new TreeMap<>();
        final StringSerializer serializer = new StringSerializer();
        for (final String key : keys) {
            try {
                final KeyQueryMetadata queryMetadata = streams.queryMetadataForKey(storeName, key, serializer);
                if (queryMetadata == null || queryMetadata.equals(KeyQueryMetadata.NOT_AVAILABLE)) {
                    noMetadataKeys.add(key);
                    continue;
                }
                if (pickInstanceByPort) {
                    assertThat(queryMetadata.standbyHosts().size(), equalTo(0));
                } else {
                    assertThat("Should have standbys to query from", !queryMetadata.standbyHosts().isEmpty());
                }
                final int index = queryMetadata.activeHost().port();
                final KafkaStreams streamsWithKey = pickInstanceByPort ? streamsList.get(index) : streams;
                final ReadOnlyWindowStore<String, Long> store = IntegrationTestUtils.getStore(storeName, streamsWithKey, true, QueryableStoreTypes.windowStore());
                if (store == null) {
                    nullStoreKeys.add(key);
                    continue;
                }
                if (store.fetch(key, ofEpochMilli(from), ofEpochMilli(to)) == null) {
                    nullValueKeys.add(key);
                }
            } catch (final InvalidStateStoreException e) {
                // there must have been at least one rebalance state
                if (stateListenerStub.mapStates.get(KafkaStreams.State.REBALANCING) < 1) {
                    throw new NoRetryException(new AssertionError(String.format("Received %s for key %s and expected at least one rebalancing state, but had none", e.getClass().getName(), key)));
                }
            } catch (final Exception e) {
                exceptionalKeys.put(key, e);
            }
        }
        assertNoKVKeyFailures(storeName, timeout, noMetadataKeys, nullStoreKeys, nullValueKeys, exceptionalKeys);
    });
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) UnknownStateStoreException(org.apache.kafka.streams.errors.UnknownStateStoreException) IOException(java.io.IOException) InvalidStateStoreException(org.apache.kafka.streams.errors.InvalidStateStoreException) NoRetryException(org.apache.kafka.test.NoRetryException) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) InvalidStateStoreException(org.apache.kafka.streams.errors.InvalidStateStoreException) NoRetryException(org.apache.kafka.test.NoRetryException) StringSerializer(org.apache.kafka.common.serialization.StringSerializer)

Example 9 with KeyQueryMetadata

use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.

the class NamedTopologyIntegrationTest method shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology.

@Test
public void shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology() throws Exception {
    try {
        // for this test we have one of the topologies read from an input topic with just one partition so
        // that there's only one instance of that topology's store and thus should always have exactly one
        // StreamsMetadata returned by any of the methods that look up all hosts with a specific store and topology
        CLUSTER.createTopic(SINGLE_PARTITION_INPUT_STREAM, 1, 1);
        CLUSTER.createTopic(SINGLE_PARTITION_OUTPUT_STREAM, 1, 1);
        produceToInputTopics(SINGLE_PARTITION_INPUT_STREAM, STANDARD_INPUT_DATA);
        final String topology1Store = "store-" + TOPOLOGY_1;
        final String topology2Store = "store-" + TOPOLOGY_2;
        topology1Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
        topology2Builder.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
        streams.addNamedTopology(topology1Builder.build());
        streams.removeNamedTopology(TOPOLOGY_1);
        assertThat(streams.getTopologyByName(TOPOLOGY_1), is(Optional.empty()));
        streams.addNamedTopology(topology1Builder.build());
        streams.addNamedTopology(topology2Builder.build());
        IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(15));
        assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_1, 3), equalTo(COUNT_OUTPUT_DATA));
        assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SINGLE_PARTITION_OUTPUT_STREAM, 3), equalTo(COUNT_OUTPUT_DATA));
        final ReadOnlyKeyValueStore<String, Long> store = streams.store(NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(TOPOLOGY_1, topology1Store, QueryableStoreTypes.keyValueStore()));
        assertThat(store.get("A"), equalTo(2L));
        final Collection<StreamsMetadata> streamsMetadata = streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1);
        final Collection<StreamsMetadata> streamsMetadata2 = streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2);
        assertThat(streamsMetadata.size(), equalTo(1));
        assertThat(streamsMetadata2.size(), equalTo(1));
        final KeyQueryMetadata keyMetadata = streams.queryMetadataForKey(topology1Store, "A", new StringSerializer(), TOPOLOGY_1);
        final KeyQueryMetadata keyMetadata2 = streams.queryMetadataForKey(topology2Store, "A", new StringSerializer(), TOPOLOGY_2);
        assertThat(keyMetadata, not(NOT_AVAILABLE));
        assertThat(keyMetadata, equalTo(keyMetadata2));
        final Map<String, Map<Integer, LagInfo>> partitionLags1 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_1);
        final Map<String, Map<Integer, LagInfo>> partitionLags2 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_2);
        assertThat(partitionLags1.keySet(), equalTo(singleton(topology1Store)));
        assertThat(partitionLags1.get(topology1Store).keySet(), equalTo(mkSet(0, 1)));
        assertThat(partitionLags2.keySet(), equalTo(singleton(topology2Store)));
        // only one copy of the store in topology-2
        assertThat(partitionLags2.get(topology2Store).keySet(), equalTo(singleton(0)));
        // Start up a second node with both topologies
        setupSecondKafkaStreams();
        topology1Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
        topology2Builder2.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
        streams2.start(asList(topology1Builder2.build(), topology2Builder2.build()));
        waitForApplicationState(asList(streams, streams2), State.RUNNING, Duration.ofSeconds(30));
        verifyMetadataForTopology(TOPOLOGY_1, streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1), streams2.streamsMetadataForStore(topology1Store, TOPOLOGY_1));
        verifyMetadataForTopology(TOPOLOGY_2, streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2), streams2.streamsMetadataForStore(topology2Store, TOPOLOGY_2));
        verifyMetadataForTopology(TOPOLOGY_1, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_1), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_1));
        verifyMetadataForTopology(TOPOLOGY_2, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_2), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_2));
    } finally {
        CLUSTER.deleteTopics(SINGLE_PARTITION_INPUT_STREAM, SINGLE_PARTITION_OUTPUT_STREAM);
    }
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) Stores(org.apache.kafka.streams.state.Stores) StreamsException(org.apache.kafka.streams.errors.StreamsException) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) Collections.singletonList(java.util.Collections.singletonList) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) Collections.singleton(java.util.Collections.singleton) Arrays.asList(java.util.Arrays.asList) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Map(java.util.Map) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ClientUtils.extractThreadId(org.apache.kafka.streams.processor.internals.ClientUtils.extractThreadId) MissingSourceTopicException(org.apache.kafka.streams.errors.MissingSourceTopicException) TopicPartition(org.apache.kafka.common.TopicPartition) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) KafkaClientSupplier(org.apache.kafka.streams.KafkaClientSupplier) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) State(org.apache.kafka.streams.KafkaStreams.State) Collectors(java.util.stream.Collectors) Bytes(org.apache.kafka.common.utils.Bytes) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) Materialized(org.apache.kafka.streams.kstream.Materialized) Optional(java.util.Optional) AddNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult) Queue(java.util.Queue) Pattern(java.util.regex.Pattern) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) CoreMatchers.not(org.hamcrest.CoreMatchers.not) NamedTopologyStoreQueryParameters(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyStoreQueryParameters) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) TestUtils.retryOnExceptionWithTimeout(org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout) KeyValue.pair(org.apache.kafka.streams.KeyValue.pair) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) CoreMatchers.nullValue(org.hamcrest.CoreMatchers.nullValue) Before(org.junit.Before) IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived) KTable(org.apache.kafka.streams.kstream.KTable) IntegrationTestUtils.waitForApplicationState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForApplicationState) Properties(java.util.Properties) StreamsUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler) Iterator(java.util.Iterator) Consumed(org.apache.kafka.streams.kstream.Consumed) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) Test(org.junit.Test) RemoveNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.RemoveNamedTopologyResult) NOT_AVAILABLE(org.apache.kafka.streams.KeyQueryMetadata.NOT_AVAILABLE) Rule(org.junit.Rule) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) LagInfo(org.apache.kafka.streams.LagInfo) UniqueTopicSerdeScope(org.apache.kafka.streams.utils.UniqueTopicSerdeScope) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Map(java.util.Map) HashMap(java.util.HashMap) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) Test(org.junit.Test)

Example 10 with KeyQueryMetadata

use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.

the class StoreQueryIntegrationTest method shouldQuerySpecificActivePartitionStores.

@Test
public void shouldQuerySpecificActivePartitionStores() throws Exception {
    final int batch1NumMessages = 100;
    final int key = 1;
    final Semaphore semaphore = new Semaphore(0);
    final StreamsBuilder builder = new StreamsBuilder();
    getStreamsBuilderWithTopology(builder, semaphore);
    final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration());
    final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration());
    final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
    startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
    produceValueRange(key, 0, batch1NumMessages);
    // Assert that all messages in the first batch were processed in a timely manner
    assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
    until(() -> {
        final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, (topic, somekey, value, numPartitions) -> 0);
        // key belongs to this partition
        final int keyPartition = keyQueryMetadata.partition();
        // key doesn't belongs to this partition
        final int keyDontBelongPartition = (keyPartition == 0) ? 1 : 0;
        final boolean kafkaStreams1IsActive = (keyQueryMetadata.activeHost().port() % 2) == 1;
        final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> storeQueryParam = StoreQueryParameters.<ReadOnlyKeyValueStore<Integer, Integer>>fromNameAndType(TABLE_NAME, keyValueStore()).withPartition(keyPartition);
        ReadOnlyKeyValueStore<Integer, Integer> store1 = null;
        ReadOnlyKeyValueStore<Integer, Integer> store2 = null;
        if (kafkaStreams1IsActive) {
            store1 = getStore(kafkaStreams1, storeQueryParam);
        } else {
            store2 = getStore(kafkaStreams2, storeQueryParam);
        }
        if (kafkaStreams1IsActive) {
            assertThat(store1, is(notNullValue()));
            assertThat(store2, is(nullValue()));
        } else {
            assertThat(store2, is(notNullValue()));
            assertThat(store1, is(nullValue()));
        }
        final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> storeQueryParam2 = StoreQueryParameters.<ReadOnlyKeyValueStore<Integer, Integer>>fromNameAndType(TABLE_NAME, keyValueStore()).withPartition(keyDontBelongPartition);
        try {
            // So, in that case, store3 would be null and the store4 would not return the value for key as wrong partition was requested
            if (kafkaStreams1IsActive) {
                assertThat(store1.get(key), is(notNullValue()));
                assertThat(getStore(kafkaStreams2, storeQueryParam2).get(key), is(nullValue()));
                final InvalidStateStoreException exception = assertThrows(InvalidStateStoreException.class, () -> getStore(kafkaStreams1, storeQueryParam2).get(key));
                assertThat(exception.getMessage(), containsString("The specified partition 1 for store source-table does not exist."));
            } else {
                assertThat(store2.get(key), is(notNullValue()));
                assertThat(getStore(kafkaStreams1, storeQueryParam2).get(key), is(nullValue()));
                final InvalidStateStoreException exception = assertThrows(InvalidStateStoreException.class, () -> getStore(kafkaStreams2, storeQueryParam2).get(key));
                assertThat(exception.getMessage(), containsString("The specified partition 1 for store source-table does not exist."));
            }
            return true;
        } catch (final InvalidStateStoreException exception) {
            verifyRetrievableException(exception);
            LOG.info("Either streams wasn't running or a re-balancing took place. Will try again.");
            return false;
        }
    });
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KafkaStreams(org.apache.kafka.streams.KafkaStreams) InvalidStateStoreException(org.apache.kafka.streams.errors.InvalidStateStoreException) Semaphore(java.util.concurrent.Semaphore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Aggregations

KeyQueryMetadata (org.apache.kafka.streams.KeyQueryMetadata)17 Test (org.junit.Test)14 KafkaStreams (org.apache.kafka.streams.KafkaStreams)8 ReadOnlyKeyValueStore (org.apache.kafka.streams.state.ReadOnlyKeyValueStore)7 Semaphore (java.util.concurrent.Semaphore)6 IntegrationTest (org.apache.kafka.test.IntegrationTest)6 TopicPartition (org.apache.kafka.common.TopicPartition)5 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)5 InvalidStateStoreException (org.apache.kafka.streams.errors.InvalidStateStoreException)4 Properties (java.util.Properties)3 PartitionInfo (org.apache.kafka.common.PartitionInfo)3 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)3 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 TreeMap (java.util.TreeMap)2 IntegerSerializer (org.apache.kafka.common.serialization.IntegerSerializer)2 StreamsMetadata (org.apache.kafka.streams.StreamsMetadata)2 KafkaStreamsNamedTopologyWrapper (org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper)2 NamedTopologyBuilder (org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder)2 KeyValueStore (org.apache.kafka.streams.state.KeyValueStore)2