use of org.apache.kafka.streams.state.ReadOnlyKeyValueStore in project kafka by apache.
the class StoreQueryIntegrationTest method shouldQueryStoresAfterAddingAndRemovingStreamThread.
@Test
public void shouldQueryStoresAfterAddingAndRemovingStreamThread() throws Exception {
final int batch1NumMessages = 100;
final int key = 1;
final int key2 = 2;
final int key3 = 3;
final Semaphore semaphore = new Semaphore(0);
final StreamsBuilder builder = new StreamsBuilder();
getStreamsBuilderWithTopology(builder, semaphore);
final Properties streamsConfiguration1 = streamsConfiguration();
streamsConfiguration1.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 1);
final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration1);
startApplicationAndWaitUntilRunning(singletonList(kafkaStreams1), Duration.ofSeconds(60));
// Add thread
final Optional<String> streamThread = kafkaStreams1.addStreamThread();
assertThat(streamThread.isPresent(), is(true));
until(() -> kafkaStreams1.state().isRunningOrRebalancing());
produceValueRange(key, 0, batch1NumMessages);
produceValueRange(key2, 0, batch1NumMessages);
produceValueRange(key3, 0, batch1NumMessages);
// Assert that all messages in the batches were processed in a timely manner
assertThat(semaphore.tryAcquire(3 * batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
until(() -> KafkaStreams.State.RUNNING.equals(kafkaStreams1.state()));
until(() -> {
final QueryableStoreType<ReadOnlyKeyValueStore<Integer, Integer>> queryableStoreType = keyValueStore();
final ReadOnlyKeyValueStore<Integer, Integer> store1 = getStore(TABLE_NAME, kafkaStreams1, queryableStoreType);
try {
assertThat(store1.get(key), is(notNullValue()));
assertThat(store1.get(key2), is(notNullValue()));
assertThat(store1.get(key3), is(notNullValue()));
return true;
} catch (final InvalidStateStoreException exception) {
verifyRetrievableException(exception);
LOG.info("Either streams wasn't running or a re-balancing took place. Will try again.");
return false;
}
});
final Optional<String> removedThreadName = kafkaStreams1.removeStreamThread();
assertThat(removedThreadName.isPresent(), is(true));
until(() -> kafkaStreams1.state().isRunningOrRebalancing());
until(() -> KafkaStreams.State.RUNNING.equals(kafkaStreams1.state()));
until(() -> {
final QueryableStoreType<ReadOnlyKeyValueStore<Integer, Integer>> queryableStoreType = keyValueStore();
final ReadOnlyKeyValueStore<Integer, Integer> store1 = getStore(TABLE_NAME, kafkaStreams1, queryableStoreType);
try {
assertThat(store1.get(key), is(notNullValue()));
assertThat(store1.get(key2), is(notNullValue()));
assertThat(store1.get(key3), is(notNullValue()));
return true;
} catch (final InvalidStateStoreException exception) {
verifyRetrievableException(exception);
LOG.info("Either streams wasn't running or a re-balancing took place. Will try again.");
return false;
}
});
}
use of org.apache.kafka.streams.state.ReadOnlyKeyValueStore in project kafka-streams-examples by confluentinc.
the class KafkaMusicExampleTest method shouldCreateChartsAndAccessThemViaInteractiveQueries.
@Test
public void shouldCreateChartsAndAccessThemViaInteractiveQueries() throws Exception {
final Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
final Map<String, String> serdeConfig = Collections.singletonMap(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, CLUSTER.schemaRegistryUrl());
final SpecificAvroSerializer<PlayEvent> playEventSerializer = new SpecificAvroSerializer<>();
playEventSerializer.configure(serdeConfig, false);
final SpecificAvroSerializer<Song> songSerializer = new SpecificAvroSerializer<>();
songSerializer.configure(serdeConfig, false);
final KafkaProducer<String, PlayEvent> playEventProducer = new KafkaProducer<>(props, Serdes.String().serializer(), playEventSerializer);
final KafkaProducer<Long, Song> songProducer = new KafkaProducer<>(props, new LongSerializer(), songSerializer);
final List<Song> songs = Arrays.asList(new Song(1L, "Fresh Fruit For Rotting Vegetables", "Dead Kennedys", "Chemical Warfare", "Punk"), new Song(2L, "We Are the League", "Anti-Nowhere League", "Animal", "Punk"), new Song(3L, "Live In A Dive", "Subhumans", "All Gone Dead", "Punk"), new Song(4L, "PSI", "Wheres The Pope?", "Fear Of God", "Punk"), new Song(5L, "Totally Exploited", "The Exploited", "Punks Not Dead", "Punk"), new Song(6L, "The Audacity Of Hype", "Jello Biafra And The Guantanamo School Of " + "Medicine", "Three Strikes", "Punk"), new Song(7L, "Licensed to Ill", "The Beastie Boys", "Fight For Your Right", "Hip Hop"), new Song(8L, "De La Soul Is Dead", "De La Soul", "Oodles Of O's", "Hip Hop"), new Song(9L, "Straight Outta Compton", "N.W.A", "Gangsta Gangsta", "Hip Hop"), new Song(10L, "Fear Of A Black Planet", "Public Enemy", "911 Is A Joke", "Hip Hop"), new Song(11L, "Curtain Call - The Hits", "Eminem", "Fack", "Hip Hop"), new Song(12L, "The Calling", "Hilltop Hoods", "The Calling", "Hip Hop"));
songs.forEach(song -> songProducer.send(new ProducerRecord<Long, Song>(KafkaMusicExample.SONG_FEED, song.getId(), song)));
songProducer.flush();
songProducer.close();
// create the play events we can use for charting
sendPlayEvents(6, songs.get(0), playEventProducer);
sendPlayEvents(5, songs.get(1), playEventProducer);
sendPlayEvents(4, songs.get(2), playEventProducer);
sendPlayEvents(3, songs.get(3), playEventProducer);
sendPlayEvents(2, songs.get(4), playEventProducer);
sendPlayEvents(1, songs.get(5), playEventProducer);
sendPlayEvents(6, songs.get(6), playEventProducer);
sendPlayEvents(5, songs.get(7), playEventProducer);
sendPlayEvents(4, songs.get(8), playEventProducer);
sendPlayEvents(3, songs.get(9), playEventProducer);
sendPlayEvents(2, songs.get(10), playEventProducer);
sendPlayEvents(1, songs.get(11), playEventProducer);
playEventProducer.close();
streams.start();
// wait until the StreamsMetadata is available as this indicates that
// KafkaStreams initialization has occurred
TestUtils.waitForCondition(() -> !StreamsMetadata.NOT_AVAILABLE.equals(streams.allMetadataForStore(KafkaMusicExample.TOP_FIVE_SONGS_STORE)), MAX_WAIT_MS, "StreamsMetadata should be available");
final String baseUrl = "http://localhost:" + appServerPort + "/kafka-music";
final Client client = ClientBuilder.newClient();
// Wait until the all-songs state store has some data in it
TestUtils.waitForCondition(() -> {
final ReadOnlyKeyValueStore<Long, Song> songsStore;
try {
songsStore = streams.store(KafkaMusicExample.ALL_SONGS, QueryableStoreTypes.<Long, Song>keyValueStore());
return songsStore.all().hasNext();
} catch (Exception e) {
return false;
}
}, MAX_WAIT_MS, KafkaMusicExample.ALL_SONGS + " should be non-empty");
final IntFunction<SongPlayCountBean> intFunction = index -> {
final Song song = songs.get(index);
return songCountPlayBean(song, 6L - (index % 6));
};
// Verify that the charts are as expected
verifyChart(baseUrl + "/charts/genre/punk", client, IntStream.range(0, 5).mapToObj(intFunction).collect(Collectors.toList()));
verifyChart(baseUrl + "/charts/genre/hip hop", client, IntStream.range(6, 11).mapToObj(intFunction).collect(Collectors.toList()));
verifyChart(baseUrl + "/charts/top-five", client, Arrays.asList(songCountPlayBean(songs.get(0), 6L), songCountPlayBean(songs.get(6), 6L), songCountPlayBean(songs.get(1), 5L), songCountPlayBean(songs.get(7), 5L), songCountPlayBean(songs.get(2), 4L)));
}
use of org.apache.kafka.streams.state.ReadOnlyKeyValueStore in project apache-kafka-on-k8s by banzaicloud.
the class CompositeReadOnlyKeyValueStore method get.
@Override
public V get(final K key) {
Objects.requireNonNull(key);
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
for (ReadOnlyKeyValueStore<K, V> store : stores) {
try {
final V result = store.get(key);
if (result != null) {
return result;
}
} catch (InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
return null;
}
use of org.apache.kafka.streams.state.ReadOnlyKeyValueStore in project apache-kafka-on-k8s by banzaicloud.
the class CompositeReadOnlyKeyValueStore method range.
@Override
public KeyValueIterator<K, V> range(final K from, final K to) {
Objects.requireNonNull(from);
Objects.requireNonNull(to);
final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() {
@Override
public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) {
try {
return store.range(from, to);
} catch (InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
};
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
return new DelegatingPeekingKeyValueIterator<>(storeName, new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction));
}
use of org.apache.kafka.streams.state.ReadOnlyKeyValueStore in project apache-kafka-on-k8s by banzaicloud.
the class CompositeReadOnlyKeyValueStore method all.
@Override
public KeyValueIterator<K, V> all() {
final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() {
@Override
public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) {
try {
return store.all();
} catch (InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
};
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
return new DelegatingPeekingKeyValueIterator<>(storeName, new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction));
}
Aggregations