use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.
the class StoreQueryIntegrationTest method shouldQuerySpecificStalePartitionStoresMultiStreamThreads.
@Test
public void shouldQuerySpecificStalePartitionStoresMultiStreamThreads() throws Exception {
final int batch1NumMessages = 100;
final int key = 1;
final Semaphore semaphore = new Semaphore(0);
final int numStreamThreads = 2;
final StreamsBuilder builder = new StreamsBuilder();
getStreamsBuilderWithTopology(builder, semaphore);
final Properties streamsConfiguration1 = streamsConfiguration();
streamsConfiguration1.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
final Properties streamsConfiguration2 = streamsConfiguration();
streamsConfiguration2.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration1);
final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration2);
final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
assertTrue(kafkaStreams1.metadataForLocalThreads().size() > 1);
assertTrue(kafkaStreams2.metadataForLocalThreads().size() > 1);
produceValueRange(key, 0, batch1NumMessages);
// Assert that all messages in the first batch were processed in a timely manner
assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, new IntegerSerializer());
// key belongs to this partition
final int keyPartition = keyQueryMetadata.partition();
// key doesn't belongs to this partition
final int keyDontBelongPartition = (keyPartition == 0) ? 1 : 0;
final QueryableStoreType<ReadOnlyKeyValueStore<Integer, Integer>> queryableStoreType = keyValueStore();
// Assert that both active and standby are able to query for a key
final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> param = StoreQueryParameters.fromNameAndType(TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyPartition);
TestUtils.waitForCondition(() -> {
final ReadOnlyKeyValueStore<Integer, Integer> store1 = getStore(kafkaStreams1, param);
return store1.get(key) != null;
}, "store1 cannot find results for key");
TestUtils.waitForCondition(() -> {
final ReadOnlyKeyValueStore<Integer, Integer> store2 = getStore(kafkaStreams2, param);
return store2.get(key) != null;
}, "store2 cannot find results for key");
final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> otherParam = StoreQueryParameters.fromNameAndType(TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyDontBelongPartition);
final ReadOnlyKeyValueStore<Integer, Integer> store3 = getStore(kafkaStreams1, otherParam);
final ReadOnlyKeyValueStore<Integer, Integer> store4 = getStore(kafkaStreams2, otherParam);
// Assert that
assertThat(store3.get(key), is(nullValue()));
assertThat(store4.get(key), is(nullValue()));
}
use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.
the class OptimizedKTableIntegrationTest method shouldApplyUpdatesToStandbyStore.
@Test
public void shouldApplyUpdatesToStandbyStore() throws Exception {
final int batch1NumMessages = 100;
final int batch2NumMessages = 100;
final int key = 1;
final Semaphore semaphore = new Semaphore(0);
final StreamsBuilder builder = new StreamsBuilder();
builder.table(INPUT_TOPIC_NAME, Consumed.with(Serdes.Integer(), Serdes.Integer()), Materialized.<Integer, Integer, KeyValueStore<Bytes, byte[]>>as(TABLE_NAME).withCachingDisabled()).toStream().peek((k, v) -> semaphore.release());
final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration());
final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration());
final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
produceValueRange(key, 0, batch1NumMessages);
// Assert that all messages in the first batch were processed in a timely manner
assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
final ReadOnlyKeyValueStore<Integer, Integer> store1 = IntegrationTestUtils.getStore(TABLE_NAME, kafkaStreams1, QueryableStoreTypes.keyValueStore());
final ReadOnlyKeyValueStore<Integer, Integer> store2 = IntegrationTestUtils.getStore(TABLE_NAME, kafkaStreams2, QueryableStoreTypes.keyValueStore());
final boolean kafkaStreams1WasFirstActive;
final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, (topic, somekey, value, numPartitions) -> 0);
// Assert that the current value in store reflects all messages being processed
if ((keyQueryMetadata.activeHost().port() % 2) == 1) {
assertThat(store1.get(key), is(equalTo(batch1NumMessages - 1)));
kafkaStreams1WasFirstActive = true;
} else {
assertThat(store2.get(key), is(equalTo(batch1NumMessages - 1)));
kafkaStreams1WasFirstActive = false;
}
if (kafkaStreams1WasFirstActive) {
kafkaStreams1.close();
} else {
kafkaStreams2.close();
}
final ReadOnlyKeyValueStore<Integer, Integer> newActiveStore = kafkaStreams1WasFirstActive ? store2 : store1;
TestUtils.retryOnExceptionWithTimeout(60 * 1000, 100, () -> {
// Assert that after failover we have recovered to the last store write
assertThat(newActiveStore.get(key), is(equalTo(batch1NumMessages - 1)));
});
final int totalNumMessages = batch1NumMessages + batch2NumMessages;
produceValueRange(key, batch1NumMessages, totalNumMessages);
// Assert that all messages in the second batch were processed in a timely manner
assertThat(semaphore.tryAcquire(batch2NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
TestUtils.retryOnExceptionWithTimeout(60 * 1000, 100, () -> {
// Assert that the current value in store reflects all messages being processed
assertThat(newActiveStore.get(key), is(equalTo(totalNumMessages - 1)));
});
}
use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.
the class QueryableStateIntegrationTest method verifyAllWindowedKeys.
private void verifyAllWindowedKeys(final List<KafkaStreams> streamsList, final KafkaStreams streams, final KafkaStreamsTest.StateListenerStub stateListenerStub, final Set<String> keys, final String storeName, final Long from, final Long to, final long timeout, final boolean pickInstanceByPort) throws Exception {
retryOnExceptionWithTimeout(timeout, () -> {
final List<String> noMetadataKeys = new ArrayList<>();
final List<String> nullStoreKeys = new ArrayList<>();
final List<String> nullValueKeys = new ArrayList<>();
final Map<String, Exception> exceptionalKeys = new TreeMap<>();
final StringSerializer serializer = new StringSerializer();
for (final String key : keys) {
try {
final KeyQueryMetadata queryMetadata = streams.queryMetadataForKey(storeName, key, serializer);
if (queryMetadata == null || queryMetadata.equals(KeyQueryMetadata.NOT_AVAILABLE)) {
noMetadataKeys.add(key);
continue;
}
if (pickInstanceByPort) {
assertThat(queryMetadata.standbyHosts().size(), equalTo(0));
} else {
assertThat("Should have standbys to query from", !queryMetadata.standbyHosts().isEmpty());
}
final int index = queryMetadata.activeHost().port();
final KafkaStreams streamsWithKey = pickInstanceByPort ? streamsList.get(index) : streams;
final ReadOnlyWindowStore<String, Long> store = IntegrationTestUtils.getStore(storeName, streamsWithKey, true, QueryableStoreTypes.windowStore());
if (store == null) {
nullStoreKeys.add(key);
continue;
}
if (store.fetch(key, ofEpochMilli(from), ofEpochMilli(to)) == null) {
nullValueKeys.add(key);
}
} catch (final InvalidStateStoreException e) {
// there must have been at least one rebalance state
if (stateListenerStub.mapStates.get(KafkaStreams.State.REBALANCING) < 1) {
throw new NoRetryException(new AssertionError(String.format("Received %s for key %s and expected at least one rebalancing state, but had none", e.getClass().getName(), key)));
}
} catch (final Exception e) {
exceptionalKeys.put(key, e);
}
}
assertNoKVKeyFailures(storeName, timeout, noMetadataKeys, nullStoreKeys, nullValueKeys, exceptionalKeys);
});
}
use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.
the class NamedTopologyIntegrationTest method shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology.
@Test
public void shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology() throws Exception {
try {
// for this test we have one of the topologies read from an input topic with just one partition so
// that there's only one instance of that topology's store and thus should always have exactly one
// StreamsMetadata returned by any of the methods that look up all hosts with a specific store and topology
CLUSTER.createTopic(SINGLE_PARTITION_INPUT_STREAM, 1, 1);
CLUSTER.createTopic(SINGLE_PARTITION_OUTPUT_STREAM, 1, 1);
produceToInputTopics(SINGLE_PARTITION_INPUT_STREAM, STANDARD_INPUT_DATA);
final String topology1Store = "store-" + TOPOLOGY_1;
final String topology2Store = "store-" + TOPOLOGY_2;
topology1Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
topology2Builder.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
streams.addNamedTopology(topology1Builder.build());
streams.removeNamedTopology(TOPOLOGY_1);
assertThat(streams.getTopologyByName(TOPOLOGY_1), is(Optional.empty()));
streams.addNamedTopology(topology1Builder.build());
streams.addNamedTopology(topology2Builder.build());
IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(15));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_1, 3), equalTo(COUNT_OUTPUT_DATA));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SINGLE_PARTITION_OUTPUT_STREAM, 3), equalTo(COUNT_OUTPUT_DATA));
final ReadOnlyKeyValueStore<String, Long> store = streams.store(NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(TOPOLOGY_1, topology1Store, QueryableStoreTypes.keyValueStore()));
assertThat(store.get("A"), equalTo(2L));
final Collection<StreamsMetadata> streamsMetadata = streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1);
final Collection<StreamsMetadata> streamsMetadata2 = streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2);
assertThat(streamsMetadata.size(), equalTo(1));
assertThat(streamsMetadata2.size(), equalTo(1));
final KeyQueryMetadata keyMetadata = streams.queryMetadataForKey(topology1Store, "A", new StringSerializer(), TOPOLOGY_1);
final KeyQueryMetadata keyMetadata2 = streams.queryMetadataForKey(topology2Store, "A", new StringSerializer(), TOPOLOGY_2);
assertThat(keyMetadata, not(NOT_AVAILABLE));
assertThat(keyMetadata, equalTo(keyMetadata2));
final Map<String, Map<Integer, LagInfo>> partitionLags1 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_1);
final Map<String, Map<Integer, LagInfo>> partitionLags2 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_2);
assertThat(partitionLags1.keySet(), equalTo(singleton(topology1Store)));
assertThat(partitionLags1.get(topology1Store).keySet(), equalTo(mkSet(0, 1)));
assertThat(partitionLags2.keySet(), equalTo(singleton(topology2Store)));
// only one copy of the store in topology-2
assertThat(partitionLags2.get(topology2Store).keySet(), equalTo(singleton(0)));
// Start up a second node with both topologies
setupSecondKafkaStreams();
topology1Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
topology2Builder2.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
streams2.start(asList(topology1Builder2.build(), topology2Builder2.build()));
waitForApplicationState(asList(streams, streams2), State.RUNNING, Duration.ofSeconds(30));
verifyMetadataForTopology(TOPOLOGY_1, streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1), streams2.streamsMetadataForStore(topology1Store, TOPOLOGY_1));
verifyMetadataForTopology(TOPOLOGY_2, streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2), streams2.streamsMetadataForStore(topology2Store, TOPOLOGY_2));
verifyMetadataForTopology(TOPOLOGY_1, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_1), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_1));
verifyMetadataForTopology(TOPOLOGY_2, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_2), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_2));
} finally {
CLUSTER.deleteTopics(SINGLE_PARTITION_INPUT_STREAM, SINGLE_PARTITION_OUTPUT_STREAM);
}
}
use of org.apache.kafka.streams.KeyQueryMetadata in project kafka by apache.
the class StoreQueryIntegrationTest method shouldQuerySpecificActivePartitionStores.
@Test
public void shouldQuerySpecificActivePartitionStores() throws Exception {
final int batch1NumMessages = 100;
final int key = 1;
final Semaphore semaphore = new Semaphore(0);
final StreamsBuilder builder = new StreamsBuilder();
getStreamsBuilderWithTopology(builder, semaphore);
final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration());
final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration());
final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
produceValueRange(key, 0, batch1NumMessages);
// Assert that all messages in the first batch were processed in a timely manner
assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
until(() -> {
final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, (topic, somekey, value, numPartitions) -> 0);
// key belongs to this partition
final int keyPartition = keyQueryMetadata.partition();
// key doesn't belongs to this partition
final int keyDontBelongPartition = (keyPartition == 0) ? 1 : 0;
final boolean kafkaStreams1IsActive = (keyQueryMetadata.activeHost().port() % 2) == 1;
final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> storeQueryParam = StoreQueryParameters.<ReadOnlyKeyValueStore<Integer, Integer>>fromNameAndType(TABLE_NAME, keyValueStore()).withPartition(keyPartition);
ReadOnlyKeyValueStore<Integer, Integer> store1 = null;
ReadOnlyKeyValueStore<Integer, Integer> store2 = null;
if (kafkaStreams1IsActive) {
store1 = getStore(kafkaStreams1, storeQueryParam);
} else {
store2 = getStore(kafkaStreams2, storeQueryParam);
}
if (kafkaStreams1IsActive) {
assertThat(store1, is(notNullValue()));
assertThat(store2, is(nullValue()));
} else {
assertThat(store2, is(notNullValue()));
assertThat(store1, is(nullValue()));
}
final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> storeQueryParam2 = StoreQueryParameters.<ReadOnlyKeyValueStore<Integer, Integer>>fromNameAndType(TABLE_NAME, keyValueStore()).withPartition(keyDontBelongPartition);
try {
// So, in that case, store3 would be null and the store4 would not return the value for key as wrong partition was requested
if (kafkaStreams1IsActive) {
assertThat(store1.get(key), is(notNullValue()));
assertThat(getStore(kafkaStreams2, storeQueryParam2).get(key), is(nullValue()));
final InvalidStateStoreException exception = assertThrows(InvalidStateStoreException.class, () -> getStore(kafkaStreams1, storeQueryParam2).get(key));
assertThat(exception.getMessage(), containsString("The specified partition 1 for store source-table does not exist."));
} else {
assertThat(store2.get(key), is(notNullValue()));
assertThat(getStore(kafkaStreams1, storeQueryParam2).get(key), is(nullValue()));
final InvalidStateStoreException exception = assertThrows(InvalidStateStoreException.class, () -> getStore(kafkaStreams2, storeQueryParam2).get(key));
assertThat(exception.getMessage(), containsString("The specified partition 1 for store source-table does not exist."));
}
return true;
} catch (final InvalidStateStoreException exception) {
verifyRetrievableException(exception);
LOG.info("Either streams wasn't running or a re-balancing took place. Will try again.");
return false;
}
});
}
Aggregations