Search in sources :

Example 11 with KeyQueryMetadata

use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.

the class StoreQueryIntegrationTest method shouldQuerySpecificStalePartitionStoresMultiStreamThreadsNamedTopology.

@Test
public void shouldQuerySpecificStalePartitionStoresMultiStreamThreadsNamedTopology() throws Exception {
    final int batch1NumMessages = 100;
    final int key = 1;
    final Semaphore semaphore = new Semaphore(0);
    final int numStreamThreads = 2;
    final Properties streamsConfiguration1 = streamsConfiguration();
    streamsConfiguration1.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
    final Properties streamsConfiguration2 = streamsConfiguration();
    streamsConfiguration2.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
    final String topologyA = "topology-A";
    final KafkaStreamsNamedTopologyWrapper kafkaStreams1 = createNamedTopologyKafkaStreams(streamsConfiguration1);
    final KafkaStreamsNamedTopologyWrapper kafkaStreams2 = createNamedTopologyKafkaStreams(streamsConfiguration2);
    final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
    final NamedTopologyBuilder builder1A = kafkaStreams1.newNamedTopologyBuilder(topologyA, streamsConfiguration1);
    getStreamsBuilderWithTopology(builder1A, semaphore);
    final NamedTopologyBuilder builder2A = kafkaStreams2.newNamedTopologyBuilder(topologyA, streamsConfiguration2);
    getStreamsBuilderWithTopology(builder2A, semaphore);
    kafkaStreams1.start(builder1A.build());
    kafkaStreams2.start(builder2A.build());
    waitForApplicationState(kafkaStreamsList, State.RUNNING, Duration.ofSeconds(60));
    assertTrue(kafkaStreams1.metadataForLocalThreads().size() > 1);
    assertTrue(kafkaStreams2.metadataForLocalThreads().size() > 1);
    produceValueRange(key, 0, batch1NumMessages);
    // Assert that all messages in the first batch were processed in a timely manner
    assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
    final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, new IntegerSerializer(), topologyA);
    // key belongs to this partition
    final int keyPartition = keyQueryMetadata.partition();
    // key doesn't belongs to this partition
    final int keyDontBelongPartition = (keyPartition == 0) ? 1 : 0;
    final QueryableStoreType<ReadOnlyKeyValueStore<Integer, Integer>> queryableStoreType = keyValueStore();
    // Assert that both active and standby are able to query for a key
    final NamedTopologyStoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> param = NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(topologyA, TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyPartition);
    TestUtils.waitForCondition(() -> {
        final ReadOnlyKeyValueStore<Integer, Integer> store1 = getStore(kafkaStreams1, param);
        return store1.get(key) != null;
    }, "store1 cannot find results for key");
    TestUtils.waitForCondition(() -> {
        final ReadOnlyKeyValueStore<Integer, Integer> store2 = getStore(kafkaStreams2, param);
        return store2.get(key) != null;
    }, "store2 cannot find results for key");
    final NamedTopologyStoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> otherParam = NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(topologyA, TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyDontBelongPartition);
    final ReadOnlyKeyValueStore<Integer, Integer> store3 = getStore(kafkaStreams1, otherParam);
    final ReadOnlyKeyValueStore<Integer, Integer> store4 = getStore(kafkaStreams2, otherParam);
    // Assert that
    assertThat(store3.get(key), is(nullValue()));
    assertThat(store4.get(key), is(nullValue()));
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) Semaphore(java.util.concurrent.Semaphore) Matchers.containsString(org.hamcrest.Matchers.containsString) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) Properties(java.util.Properties) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 12 with KeyQueryMetadata

use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.

the class StoreQueryIntegrationTest method shouldQueryOnlyActivePartitionStoresByDefault.

@Test
public void shouldQueryOnlyActivePartitionStoresByDefault() throws Exception {
    final int batch1NumMessages = 100;
    final int key = 1;
    final Semaphore semaphore = new Semaphore(0);
    final StreamsBuilder builder = new StreamsBuilder();
    getStreamsBuilderWithTopology(builder, semaphore);
    final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration());
    final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration());
    final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
    startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
    produceValueRange(key, 0, batch1NumMessages);
    // Assert that all messages in the first batch were processed in a timely manner
    assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
    until(() -> {
        final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, (topic, somekey, value, numPartitions) -> 0);
        final QueryableStoreType<ReadOnlyKeyValueStore<Integer, Integer>> queryableStoreType = keyValueStore();
        final ReadOnlyKeyValueStore<Integer, Integer> store1 = getStore(TABLE_NAME, kafkaStreams1, queryableStoreType);
        final ReadOnlyKeyValueStore<Integer, Integer> store2 = getStore(TABLE_NAME, kafkaStreams2, queryableStoreType);
        final boolean kafkaStreams1IsActive = (keyQueryMetadata.activeHost().port() % 2) == 1;
        try {
            if (kafkaStreams1IsActive) {
                assertThat(store1.get(key), is(notNullValue()));
                assertThat(store2.get(key), is(nullValue()));
            } else {
                assertThat(store1.get(key), is(nullValue()));
                assertThat(store2.get(key), is(notNullValue()));
            }
            return true;
        } catch (final InvalidStateStoreException exception) {
            verifyRetrievableException(exception);
            LOG.info("Either streams wasn't running or a re-balancing took place. Will try again.");
            return false;
        }
    });
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KafkaStreams(org.apache.kafka.streams.KafkaStreams) InvalidStateStoreException(org.apache.kafka.streams.errors.InvalidStateStoreException) Semaphore(java.util.concurrent.Semaphore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 13 with KeyQueryMetadata

use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.

the class StoreQueryIntegrationTest method shouldQuerySpecificStalePartitionStores.

@Test
public void shouldQuerySpecificStalePartitionStores() throws Exception {
    final int batch1NumMessages = 100;
    final int key = 1;
    final Semaphore semaphore = new Semaphore(0);
    final StreamsBuilder builder = new StreamsBuilder();
    getStreamsBuilderWithTopology(builder, semaphore);
    final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration());
    final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration());
    final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
    startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
    produceValueRange(key, 0, batch1NumMessages);
    // Assert that all messages in the first batch were processed in a timely manner
    assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
    final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, (topic, somekey, value, numPartitions) -> 0);
    // key belongs to this partition
    final int keyPartition = keyQueryMetadata.partition();
    // key doesn't belongs to this partition
    final int keyDontBelongPartition = (keyPartition == 0) ? 1 : 0;
    final QueryableStoreType<ReadOnlyKeyValueStore<Integer, Integer>> queryableStoreType = keyValueStore();
    // Assert that both active and standby are able to query for a key
    final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> param = StoreQueryParameters.fromNameAndType(TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyPartition);
    TestUtils.waitForCondition(() -> {
        final ReadOnlyKeyValueStore<Integer, Integer> store1 = getStore(kafkaStreams1, param);
        return store1.get(key) != null;
    }, "store1 cannot find results for key");
    TestUtils.waitForCondition(() -> {
        final ReadOnlyKeyValueStore<Integer, Integer> store2 = getStore(kafkaStreams2, param);
        return store2.get(key) != null;
    }, "store2 cannot find results for key");
    final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> otherParam = StoreQueryParameters.fromNameAndType(TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyDontBelongPartition);
    final ReadOnlyKeyValueStore<Integer, Integer> store3 = getStore(kafkaStreams1, otherParam);
    final ReadOnlyKeyValueStore<Integer, Integer> store4 = getStore(kafkaStreams2, otherParam);
    // Assert that
    assertThat(store3.get(key), is(nullValue()));
    assertThat(store4.get(key), is(nullValue()));
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Semaphore(java.util.concurrent.Semaphore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 14 with KeyQueryMetadata

use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.

the class StreamsMetadataState method getKeyQueryMetadataForKey.

private <K> KeyQueryMetadata getKeyQueryMetadataForKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner, final SourceTopicsInfo sourceTopicsInfo) {
    final Integer partition = partitioner.partition(sourceTopicsInfo.topicWithMostPartitions, key, null, sourceTopicsInfo.maxPartitions);
    final Set<TopicPartition> matchingPartitions = new HashSet<>();
    for (final String sourceTopic : sourceTopicsInfo.sourceTopics) {
        matchingPartitions.add(new TopicPartition(sourceTopic, partition));
    }
    HostInfo activeHost = UNKNOWN_HOST;
    final Set<HostInfo> standbyHosts = new HashSet<>();
    for (final StreamsMetadata streamsMetadata : allMetadata) {
        final Set<String> activeStateStoreNames = streamsMetadata.stateStoreNames();
        final Set<TopicPartition> topicPartitions = new HashSet<>(streamsMetadata.topicPartitions());
        final Set<String> standbyStateStoreNames = streamsMetadata.standbyStateStoreNames();
        final Set<TopicPartition> standbyTopicPartitions = new HashSet<>(streamsMetadata.standbyTopicPartitions());
        topicPartitions.retainAll(matchingPartitions);
        if (activeStateStoreNames.contains(storeName) && !topicPartitions.isEmpty()) {
            activeHost = streamsMetadata.hostInfo();
        }
        standbyTopicPartitions.retainAll(matchingPartitions);
        if (standbyStateStoreNames.contains(storeName) && !standbyTopicPartitions.isEmpty()) {
            standbyHosts.add(streamsMetadata.hostInfo());
        }
    }
    return new KeyQueryMetadata(activeHost, standbyHosts, partition);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) HostInfo(org.apache.kafka.streams.state.HostInfo) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) HashSet(java.util.HashSet)

Example 15 with KeyQueryMetadata

use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.

the class StreamsMetadataStateTest method shouldReturnNullOnGetWithKeyWhenStoreDoesntExist.

@Test
public void shouldReturnNullOnGetWithKeyWhenStoreDoesntExist() {
    final KeyQueryMetadata actual = metadataState.getKeyQueryMetadataForKey("not-a-store", "key", Serdes.String().serializer());
    assertNull(actual);
}
Also used : KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) Test(org.junit.Test)

Aggregations

KeyQueryMetadata (org.apache.kafka.streams.KeyQueryMetadata)17 Test (org.junit.Test)14 KafkaStreams (org.apache.kafka.streams.KafkaStreams)8 ReadOnlyKeyValueStore (org.apache.kafka.streams.state.ReadOnlyKeyValueStore)7 Semaphore (java.util.concurrent.Semaphore)6 IntegrationTest (org.apache.kafka.test.IntegrationTest)6 TopicPartition (org.apache.kafka.common.TopicPartition)5 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)5 InvalidStateStoreException (org.apache.kafka.streams.errors.InvalidStateStoreException)4 Properties (java.util.Properties)3 PartitionInfo (org.apache.kafka.common.PartitionInfo)3 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)3 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 TreeMap (java.util.TreeMap)2 IntegerSerializer (org.apache.kafka.common.serialization.IntegerSerializer)2 StreamsMetadata (org.apache.kafka.streams.StreamsMetadata)2 KafkaStreamsNamedTopologyWrapper (org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper)2 NamedTopologyBuilder (org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder)2 KeyValueStore (org.apache.kafka.streams.state.KeyValueStore)2