Search in sources :

Example 21 with ReadOnlyKeyValueStore

use of org.apache.kafka.streams.state.ReadOnlyKeyValueStore in project kafka by apache.

the class QueryableStateIntegrationTest method shouldBeAbleToQueryFilterState.

@Test
public void shouldBeAbleToQueryFilterState() throws Exception {
    streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Long().getClass());
    final StreamsBuilder builder = new StreamsBuilder();
    final String[] keys = { "hello", "goodbye", "welcome", "go", "kafka" };
    final Set<KeyValue<String, Long>> batch1 = new HashSet<>(Arrays.asList(new KeyValue<>(keys[0], 1L), new KeyValue<>(keys[1], 1L), new KeyValue<>(keys[2], 3L), new KeyValue<>(keys[3], 5L), new KeyValue<>(keys[4], 2L)));
    final Set<KeyValue<String, Long>> expectedBatch1 = new HashSet<>(Collections.singleton(new KeyValue<>(keys[4], 2L)));
    IntegrationTestUtils.produceKeyValuesSynchronously(streamOne, batch1, TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, LongSerializer.class, new Properties()), mockTime);
    final Predicate<String, Long> filterPredicate = (key, value) -> key.contains("kafka");
    final KTable<String, Long> t1 = builder.table(streamOne);
    final KTable<String, Long> t2 = t1.filter(filterPredicate, Materialized.as("queryFilter"));
    t1.filterNot(filterPredicate, Materialized.as("queryFilterNot"));
    t2.toStream().to(outputTopic);
    kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration);
    startKafkaStreamsAndWaitForRunningState(kafkaStreams);
    waitUntilAtLeastNumRecordProcessed(outputTopic, 1);
    final ReadOnlyKeyValueStore<String, Long> myFilterStore = IntegrationTestUtils.getStore("queryFilter", kafkaStreams, keyValueStore());
    final ReadOnlyKeyValueStore<String, Long> myFilterNotStore = IntegrationTestUtils.getStore("queryFilterNot", kafkaStreams, keyValueStore());
    for (final KeyValue<String, Long> expectedEntry : expectedBatch1) {
        TestUtils.waitForCondition(() -> expectedEntry.value.equals(myFilterStore.get(expectedEntry.key)), "Cannot get expected result");
    }
    for (final KeyValue<String, Long> batchEntry : batch1) {
        if (!expectedBatch1.contains(batchEntry)) {
            TestUtils.waitForCondition(() -> myFilterStore.get(batchEntry.key) == null, "Cannot get null result");
        }
    }
    for (final KeyValue<String, Long> expectedEntry : expectedBatch1) {
        TestUtils.waitForCondition(() -> myFilterNotStore.get(expectedEntry.key) == null, "Cannot get null result");
    }
    for (final KeyValue<String, Long> batchEntry : batch1) {
        if (!expectedBatch1.contains(batchEntry)) {
            TestUtils.waitForCondition(() -> batchEntry.value.equals(myFilterNotStore.get(batchEntry.key)), "Cannot get expected result");
        }
    }
}
Also used : Arrays(java.util.Arrays) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) MockTime(kafka.utils.MockTime) Instant.ofEpochMilli(java.time.Instant.ofEpochMilli) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Duration(java.time.Duration) Map(java.util.Map) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) StoreQueryParameters.fromNameAndType(org.apache.kafka.streams.StoreQueryParameters.fromNameAndType) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) StreamsTestUtils.startKafkaStreamsAndWaitForRunningState(org.apache.kafka.test.StreamsTestUtils.startKafkaStreamsAndWaitForRunningState) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) State(org.apache.kafka.streams.KafkaStreams.State) Category(org.junit.experimental.categories.Category) KafkaStreamsTest(org.apache.kafka.streams.KafkaStreamsTest) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) Predicate(org.apache.kafka.streams.kstream.Predicate) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Matchers.is(org.hamcrest.Matchers.is) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) KGroupedStream(org.apache.kafka.streams.kstream.KGroupedStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) TreeSet(java.util.TreeSet) UnknownStateStoreException(org.apache.kafka.streams.errors.UnknownStateStoreException) ArrayList(java.util.ArrayList) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Before(org.junit.Before) KTable(org.apache.kafka.streams.kstream.KTable) IntegrationTestUtils.waitForApplicationState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForApplicationState) Properties(java.util.Properties) Test(org.junit.Test) IOException(java.io.IOException) File(java.io.File) Assert.assertNull(org.junit.Assert.assertNull) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) StringReader(java.io.StringReader) TreeMap(java.util.TreeMap) IntegrationTestUtils.getRunningStreams(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.getRunningStreams) TimeWindows(org.apache.kafka.streams.kstream.TimeWindows) KafkaStreams(org.apache.kafka.streams.KafkaStreams) BufferedReader(java.io.BufferedReader) ReadOnlySessionStore(org.apache.kafka.streams.state.ReadOnlySessionStore) Assert.assertEquals(org.junit.Assert.assertEquals) QueryableStoreTypes.sessionStore(org.apache.kafka.streams.state.QueryableStoreTypes.sessionStore) QueryableStoreTypes.keyValueStore(org.apache.kafka.streams.state.QueryableStoreTypes.keyValueStore) Produced(org.apache.kafka.streams.kstream.Produced) LoggerFactory(org.slf4j.LoggerFactory) IsEqual.equalTo(org.hamcrest.core.IsEqual.equalTo) Serde(org.apache.kafka.common.serialization.Serde) After(org.junit.After) Serdes(org.apache.kafka.common.serialization.Serdes) MockMapper(org.apache.kafka.test.MockMapper) KeyValue(org.apache.kafka.streams.KeyValue) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) Bytes(org.apache.kafka.common.utils.Bytes) Objects(java.util.Objects) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) Materialized(org.apache.kafka.streams.kstream.Materialized) Entry(java.util.Map.Entry) Duration.ofMillis(java.time.Duration.ofMillis) InvalidStateStoreException(org.apache.kafka.streams.errors.InvalidStateStoreException) StreamsConfig(org.apache.kafka.streams.StreamsConfig) ReadOnlyWindowStore(org.apache.kafka.streams.state.ReadOnlyWindowStore) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) IntegrationTestUtils.startApplicationAndWaitUntilRunning(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.startApplicationAndWaitUntilRunning) BeforeClass(org.junit.BeforeClass) Assert.assertThrows(org.junit.Assert.assertThrows) IntegrationTest(org.apache.kafka.test.IntegrationTest) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) KStream(org.apache.kafka.streams.kstream.KStream) Duration.ofSeconds(java.time.Duration.ofSeconds) NoRetryException(org.apache.kafka.test.NoRetryException) HashSet(java.util.HashSet) TestUtils.retryOnExceptionWithTimeout(org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) ValueMapper(org.apache.kafka.streams.kstream.ValueMapper) PrintStream(java.io.PrintStream) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Logger(org.slf4j.Logger) Consumed(org.apache.kafka.streams.kstream.Consumed) Matchers(org.hamcrest.Matchers) TimeUnit(java.util.concurrent.TimeUnit) KeyValueIterator(org.apache.kafka.streams.state.KeyValueIterator) Rule(org.junit.Rule) LagInfo(org.apache.kafka.streams.LagInfo) WindowStoreIterator(org.apache.kafka.streams.state.WindowStoreIterator) FileReader(java.io.FileReader) Comparator(java.util.Comparator) Collections(java.util.Collections) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Properties(java.util.Properties) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) HashSet(java.util.HashSet) KafkaStreamsTest(org.apache.kafka.streams.KafkaStreamsTest) Test(org.junit.Test) IntegrationTest(org.apache.kafka.test.IntegrationTest)

Example 22 with ReadOnlyKeyValueStore

use of org.apache.kafka.streams.state.ReadOnlyKeyValueStore in project kafka by apache.

the class NamedTopologyIntegrationTest method shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology.

@Test
public void shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology() throws Exception {
    try {
        // for this test we have one of the topologies read from an input topic with just one partition so
        // that there's only one instance of that topology's store and thus should always have exactly one
        // StreamsMetadata returned by any of the methods that look up all hosts with a specific store and topology
        CLUSTER.createTopic(SINGLE_PARTITION_INPUT_STREAM, 1, 1);
        CLUSTER.createTopic(SINGLE_PARTITION_OUTPUT_STREAM, 1, 1);
        produceToInputTopics(SINGLE_PARTITION_INPUT_STREAM, STANDARD_INPUT_DATA);
        final String topology1Store = "store-" + TOPOLOGY_1;
        final String topology2Store = "store-" + TOPOLOGY_2;
        topology1Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
        topology2Builder.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
        streams.addNamedTopology(topology1Builder.build());
        streams.removeNamedTopology(TOPOLOGY_1);
        assertThat(streams.getTopologyByName(TOPOLOGY_1), is(Optional.empty()));
        streams.addNamedTopology(topology1Builder.build());
        streams.addNamedTopology(topology2Builder.build());
        IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(15));
        assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_1, 3), equalTo(COUNT_OUTPUT_DATA));
        assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SINGLE_PARTITION_OUTPUT_STREAM, 3), equalTo(COUNT_OUTPUT_DATA));
        final ReadOnlyKeyValueStore<String, Long> store = streams.store(NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(TOPOLOGY_1, topology1Store, QueryableStoreTypes.keyValueStore()));
        assertThat(store.get("A"), equalTo(2L));
        final Collection<StreamsMetadata> streamsMetadata = streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1);
        final Collection<StreamsMetadata> streamsMetadata2 = streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2);
        assertThat(streamsMetadata.size(), equalTo(1));
        assertThat(streamsMetadata2.size(), equalTo(1));
        final KeyQueryMetadata keyMetadata = streams.queryMetadataForKey(topology1Store, "A", new StringSerializer(), TOPOLOGY_1);
        final KeyQueryMetadata keyMetadata2 = streams.queryMetadataForKey(topology2Store, "A", new StringSerializer(), TOPOLOGY_2);
        assertThat(keyMetadata, not(NOT_AVAILABLE));
        assertThat(keyMetadata, equalTo(keyMetadata2));
        final Map<String, Map<Integer, LagInfo>> partitionLags1 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_1);
        final Map<String, Map<Integer, LagInfo>> partitionLags2 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_2);
        assertThat(partitionLags1.keySet(), equalTo(singleton(topology1Store)));
        assertThat(partitionLags1.get(topology1Store).keySet(), equalTo(mkSet(0, 1)));
        assertThat(partitionLags2.keySet(), equalTo(singleton(topology2Store)));
        // only one copy of the store in topology-2
        assertThat(partitionLags2.get(topology2Store).keySet(), equalTo(singleton(0)));
        // Start up a second node with both topologies
        setupSecondKafkaStreams();
        topology1Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
        topology2Builder2.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
        streams2.start(asList(topology1Builder2.build(), topology2Builder2.build()));
        waitForApplicationState(asList(streams, streams2), State.RUNNING, Duration.ofSeconds(30));
        verifyMetadataForTopology(TOPOLOGY_1, streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1), streams2.streamsMetadataForStore(topology1Store, TOPOLOGY_1));
        verifyMetadataForTopology(TOPOLOGY_2, streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2), streams2.streamsMetadataForStore(topology2Store, TOPOLOGY_2));
        verifyMetadataForTopology(TOPOLOGY_1, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_1), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_1));
        verifyMetadataForTopology(TOPOLOGY_2, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_2), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_2));
    } finally {
        CLUSTER.deleteTopics(SINGLE_PARTITION_INPUT_STREAM, SINGLE_PARTITION_OUTPUT_STREAM);
    }
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) Stores(org.apache.kafka.streams.state.Stores) StreamsException(org.apache.kafka.streams.errors.StreamsException) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) Collections.singletonList(java.util.Collections.singletonList) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) Collections.singleton(java.util.Collections.singleton) Arrays.asList(java.util.Arrays.asList) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Map(java.util.Map) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ClientUtils.extractThreadId(org.apache.kafka.streams.processor.internals.ClientUtils.extractThreadId) MissingSourceTopicException(org.apache.kafka.streams.errors.MissingSourceTopicException) TopicPartition(org.apache.kafka.common.TopicPartition) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) KafkaClientSupplier(org.apache.kafka.streams.KafkaClientSupplier) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) State(org.apache.kafka.streams.KafkaStreams.State) Collectors(java.util.stream.Collectors) Bytes(org.apache.kafka.common.utils.Bytes) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) Materialized(org.apache.kafka.streams.kstream.Materialized) Optional(java.util.Optional) AddNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult) Queue(java.util.Queue) Pattern(java.util.regex.Pattern) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) CoreMatchers.not(org.hamcrest.CoreMatchers.not) NamedTopologyStoreQueryParameters(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyStoreQueryParameters) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) TestUtils.retryOnExceptionWithTimeout(org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout) KeyValue.pair(org.apache.kafka.streams.KeyValue.pair) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) CoreMatchers.nullValue(org.hamcrest.CoreMatchers.nullValue) Before(org.junit.Before) IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived) KTable(org.apache.kafka.streams.kstream.KTable) IntegrationTestUtils.waitForApplicationState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForApplicationState) Properties(java.util.Properties) StreamsUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler) Iterator(java.util.Iterator) Consumed(org.apache.kafka.streams.kstream.Consumed) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) Test(org.junit.Test) RemoveNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.RemoveNamedTopologyResult) NOT_AVAILABLE(org.apache.kafka.streams.KeyQueryMetadata.NOT_AVAILABLE) Rule(org.junit.Rule) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) LagInfo(org.apache.kafka.streams.LagInfo) UniqueTopicSerdeScope(org.apache.kafka.streams.utils.UniqueTopicSerdeScope) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Map(java.util.Map) HashMap(java.util.HashMap) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) Test(org.junit.Test)

Example 23 with ReadOnlyKeyValueStore

use of org.apache.kafka.streams.state.ReadOnlyKeyValueStore in project kafka by apache.

the class StoreQueryIntegrationTest method shouldQuerySpecificActivePartitionStores.

@Test
public void shouldQuerySpecificActivePartitionStores() throws Exception {
    final int batch1NumMessages = 100;
    final int key = 1;
    final Semaphore semaphore = new Semaphore(0);
    final StreamsBuilder builder = new StreamsBuilder();
    getStreamsBuilderWithTopology(builder, semaphore);
    final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration());
    final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration());
    final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
    startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
    produceValueRange(key, 0, batch1NumMessages);
    // Assert that all messages in the first batch were processed in a timely manner
    assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
    until(() -> {
        final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, (topic, somekey, value, numPartitions) -> 0);
        // key belongs to this partition
        final int keyPartition = keyQueryMetadata.partition();
        // key doesn't belongs to this partition
        final int keyDontBelongPartition = (keyPartition == 0) ? 1 : 0;
        final boolean kafkaStreams1IsActive = (keyQueryMetadata.activeHost().port() % 2) == 1;
        final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> storeQueryParam = StoreQueryParameters.<ReadOnlyKeyValueStore<Integer, Integer>>fromNameAndType(TABLE_NAME, keyValueStore()).withPartition(keyPartition);
        ReadOnlyKeyValueStore<Integer, Integer> store1 = null;
        ReadOnlyKeyValueStore<Integer, Integer> store2 = null;
        if (kafkaStreams1IsActive) {
            store1 = getStore(kafkaStreams1, storeQueryParam);
        } else {
            store2 = getStore(kafkaStreams2, storeQueryParam);
        }
        if (kafkaStreams1IsActive) {
            assertThat(store1, is(notNullValue()));
            assertThat(store2, is(nullValue()));
        } else {
            assertThat(store2, is(notNullValue()));
            assertThat(store1, is(nullValue()));
        }
        final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> storeQueryParam2 = StoreQueryParameters.<ReadOnlyKeyValueStore<Integer, Integer>>fromNameAndType(TABLE_NAME, keyValueStore()).withPartition(keyDontBelongPartition);
        try {
            // So, in that case, store3 would be null and the store4 would not return the value for key as wrong partition was requested
            if (kafkaStreams1IsActive) {
                assertThat(store1.get(key), is(notNullValue()));
                assertThat(getStore(kafkaStreams2, storeQueryParam2).get(key), is(nullValue()));
                final InvalidStateStoreException exception = assertThrows(InvalidStateStoreException.class, () -> getStore(kafkaStreams1, storeQueryParam2).get(key));
                assertThat(exception.getMessage(), containsString("The specified partition 1 for store source-table does not exist."));
            } else {
                assertThat(store2.get(key), is(notNullValue()));
                assertThat(getStore(kafkaStreams1, storeQueryParam2).get(key), is(nullValue()));
                final InvalidStateStoreException exception = assertThrows(InvalidStateStoreException.class, () -> getStore(kafkaStreams2, storeQueryParam2).get(key));
                assertThat(exception.getMessage(), containsString("The specified partition 1 for store source-table does not exist."));
            }
            return true;
        } catch (final InvalidStateStoreException exception) {
            verifyRetrievableException(exception);
            LOG.info("Either streams wasn't running or a re-balancing took place. Will try again.");
            return false;
        }
    });
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KafkaStreams(org.apache.kafka.streams.KafkaStreams) InvalidStateStoreException(org.apache.kafka.streams.errors.InvalidStateStoreException) Semaphore(java.util.concurrent.Semaphore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 24 with ReadOnlyKeyValueStore

use of org.apache.kafka.streams.state.ReadOnlyKeyValueStore in project kafka by apache.

the class StoreQueryIntegrationTest method shouldQuerySpecificStalePartitionStoresMultiStreamThreadsNamedTopology.

@Test
public void shouldQuerySpecificStalePartitionStoresMultiStreamThreadsNamedTopology() throws Exception {
    final int batch1NumMessages = 100;
    final int key = 1;
    final Semaphore semaphore = new Semaphore(0);
    final int numStreamThreads = 2;
    final Properties streamsConfiguration1 = streamsConfiguration();
    streamsConfiguration1.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
    final Properties streamsConfiguration2 = streamsConfiguration();
    streamsConfiguration2.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
    final String topologyA = "topology-A";
    final KafkaStreamsNamedTopologyWrapper kafkaStreams1 = createNamedTopologyKafkaStreams(streamsConfiguration1);
    final KafkaStreamsNamedTopologyWrapper kafkaStreams2 = createNamedTopologyKafkaStreams(streamsConfiguration2);
    final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
    final NamedTopologyBuilder builder1A = kafkaStreams1.newNamedTopologyBuilder(topologyA, streamsConfiguration1);
    getStreamsBuilderWithTopology(builder1A, semaphore);
    final NamedTopologyBuilder builder2A = kafkaStreams2.newNamedTopologyBuilder(topologyA, streamsConfiguration2);
    getStreamsBuilderWithTopology(builder2A, semaphore);
    kafkaStreams1.start(builder1A.build());
    kafkaStreams2.start(builder2A.build());
    waitForApplicationState(kafkaStreamsList, State.RUNNING, Duration.ofSeconds(60));
    assertTrue(kafkaStreams1.metadataForLocalThreads().size() > 1);
    assertTrue(kafkaStreams2.metadataForLocalThreads().size() > 1);
    produceValueRange(key, 0, batch1NumMessages);
    // Assert that all messages in the first batch were processed in a timely manner
    assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
    final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, new IntegerSerializer(), topologyA);
    // key belongs to this partition
    final int keyPartition = keyQueryMetadata.partition();
    // key doesn't belongs to this partition
    final int keyDontBelongPartition = (keyPartition == 0) ? 1 : 0;
    final QueryableStoreType<ReadOnlyKeyValueStore<Integer, Integer>> queryableStoreType = keyValueStore();
    // Assert that both active and standby are able to query for a key
    final NamedTopologyStoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> param = NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(topologyA, TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyPartition);
    TestUtils.waitForCondition(() -> {
        final ReadOnlyKeyValueStore<Integer, Integer> store1 = getStore(kafkaStreams1, param);
        return store1.get(key) != null;
    }, "store1 cannot find results for key");
    TestUtils.waitForCondition(() -> {
        final ReadOnlyKeyValueStore<Integer, Integer> store2 = getStore(kafkaStreams2, param);
        return store2.get(key) != null;
    }, "store2 cannot find results for key");
    final NamedTopologyStoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> otherParam = NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(topologyA, TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyDontBelongPartition);
    final ReadOnlyKeyValueStore<Integer, Integer> store3 = getStore(kafkaStreams1, otherParam);
    final ReadOnlyKeyValueStore<Integer, Integer> store4 = getStore(kafkaStreams2, otherParam);
    // Assert that
    assertThat(store3.get(key), is(nullValue()));
    assertThat(store4.get(key), is(nullValue()));
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) Semaphore(java.util.concurrent.Semaphore) Matchers.containsString(org.hamcrest.Matchers.containsString) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) Properties(java.util.Properties) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 25 with ReadOnlyKeyValueStore

use of org.apache.kafka.streams.state.ReadOnlyKeyValueStore in project kafka by apache.

the class StoreQueryIntegrationTest method shouldQueryOnlyActivePartitionStoresByDefault.

@Test
public void shouldQueryOnlyActivePartitionStoresByDefault() throws Exception {
    final int batch1NumMessages = 100;
    final int key = 1;
    final Semaphore semaphore = new Semaphore(0);
    final StreamsBuilder builder = new StreamsBuilder();
    getStreamsBuilderWithTopology(builder, semaphore);
    final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration());
    final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration());
    final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
    startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
    produceValueRange(key, 0, batch1NumMessages);
    // Assert that all messages in the first batch were processed in a timely manner
    assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
    until(() -> {
        final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, (topic, somekey, value, numPartitions) -> 0);
        final QueryableStoreType<ReadOnlyKeyValueStore<Integer, Integer>> queryableStoreType = keyValueStore();
        final ReadOnlyKeyValueStore<Integer, Integer> store1 = getStore(TABLE_NAME, kafkaStreams1, queryableStoreType);
        final ReadOnlyKeyValueStore<Integer, Integer> store2 = getStore(TABLE_NAME, kafkaStreams2, queryableStoreType);
        final boolean kafkaStreams1IsActive = (keyQueryMetadata.activeHost().port() % 2) == 1;
        try {
            if (kafkaStreams1IsActive) {
                assertThat(store1.get(key), is(notNullValue()));
                assertThat(store2.get(key), is(nullValue()));
            } else {
                assertThat(store1.get(key), is(nullValue()));
                assertThat(store2.get(key), is(notNullValue()));
            }
            return true;
        } catch (final InvalidStateStoreException exception) {
            verifyRetrievableException(exception);
            LOG.info("Either streams wasn't running or a re-balancing took place. Will try again.");
            return false;
        }
    });
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KafkaStreams(org.apache.kafka.streams.KafkaStreams) InvalidStateStoreException(org.apache.kafka.streams.errors.InvalidStateStoreException) Semaphore(java.util.concurrent.Semaphore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Aggregations

ReadOnlyKeyValueStore (org.apache.kafka.streams.state.ReadOnlyKeyValueStore)29 Test (org.junit.Test)20 KafkaStreams (org.apache.kafka.streams.KafkaStreams)17 IntegrationTest (org.apache.kafka.test.IntegrationTest)17 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)16 InvalidStateStoreException (org.apache.kafka.streams.errors.InvalidStateStoreException)15 Properties (java.util.Properties)11 KeyValue (org.apache.kafka.streams.KeyValue)10 KeyValueStore (org.apache.kafka.streams.state.KeyValueStore)10 Semaphore (java.util.concurrent.Semaphore)9 KeyQueryMetadata (org.apache.kafka.streams.KeyQueryMetadata)9 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)8 HashSet (java.util.HashSet)6 KafkaStreamsTest (org.apache.kafka.streams.KafkaStreamsTest)6 Map (java.util.Map)5 List (java.util.List)4 Utils.mkProperties (org.apache.kafka.common.utils.Utils.mkProperties)4 Duration (java.time.Duration)3 Arrays (java.util.Arrays)3 Collections (java.util.Collections)3