use of org.apache.kafka.streams.errors.InvalidStateStoreException in project kafka by apache.
the class QueryableStateIntegrationTest method verifyAllWindowedKeys.
private void verifyAllWindowedKeys(final List<KafkaStreams> streamsList, final KafkaStreams streams, final KafkaStreamsTest.StateListenerStub stateListenerStub, final Set<String> keys, final String storeName, final Long from, final Long to, final long timeout, final boolean pickInstanceByPort) throws Exception {
retryOnExceptionWithTimeout(timeout, () -> {
final List<String> noMetadataKeys = new ArrayList<>();
final List<String> nullStoreKeys = new ArrayList<>();
final List<String> nullValueKeys = new ArrayList<>();
final Map<String, Exception> exceptionalKeys = new TreeMap<>();
final StringSerializer serializer = new StringSerializer();
for (final String key : keys) {
try {
final KeyQueryMetadata queryMetadata = streams.queryMetadataForKey(storeName, key, serializer);
if (queryMetadata == null || queryMetadata.equals(KeyQueryMetadata.NOT_AVAILABLE)) {
noMetadataKeys.add(key);
continue;
}
if (pickInstanceByPort) {
assertThat(queryMetadata.standbyHosts().size(), equalTo(0));
} else {
assertThat("Should have standbys to query from", !queryMetadata.standbyHosts().isEmpty());
}
final int index = queryMetadata.activeHost().port();
final KafkaStreams streamsWithKey = pickInstanceByPort ? streamsList.get(index) : streams;
final ReadOnlyWindowStore<String, Long> store = IntegrationTestUtils.getStore(storeName, streamsWithKey, true, QueryableStoreTypes.windowStore());
if (store == null) {
nullStoreKeys.add(key);
continue;
}
if (store.fetch(key, ofEpochMilli(from), ofEpochMilli(to)) == null) {
nullValueKeys.add(key);
}
} catch (final InvalidStateStoreException e) {
// there must have been at least one rebalance state
if (stateListenerStub.mapStates.get(KafkaStreams.State.REBALANCING) < 1) {
throw new NoRetryException(new AssertionError(String.format("Received %s for key %s and expected at least one rebalancing state, but had none", e.getClass().getName(), key)));
}
} catch (final Exception e) {
exceptionalKeys.put(key, e);
}
}
assertNoKVKeyFailures(storeName, timeout, noMetadataKeys, nullStoreKeys, nullValueKeys, exceptionalKeys);
});
}
use of org.apache.kafka.streams.errors.InvalidStateStoreException in project kafka by apache.
the class QueryableStateIntegrationTest method shouldRejectWronglyTypedStore.
@Test
public void shouldRejectWronglyTypedStore() throws InterruptedException {
final String uniqueTestName = safeUniqueTestName(getClass(), testName);
final String input = uniqueTestName + "-input";
final String storeName = uniqueTestName + "-input-table";
final StreamsBuilder builder = new StreamsBuilder();
builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
CLUSTER.createTopic(input);
final Properties properties = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, uniqueTestName + "-app"), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
try (final KafkaStreams streams = getRunningStreams(properties, builder, true)) {
final ReadOnlyKeyValueStore<String, String> store = streams.store(fromNameAndType(storeName, keyValueStore()));
assertThat(store, Matchers.notNullValue());
// Note that to check the type we actually need a store reference,
// so we can't check when you get the IQ store, only when you
// try to use it. Presumably, this could be improved.
final ReadOnlySessionStore<String, String> sessionStore = streams.store(fromNameAndType(storeName, sessionStore()));
final InvalidStateStoreException exception = assertThrows(InvalidStateStoreException.class, () -> sessionStore.fetch("a"));
assertThat(exception.getMessage(), is("Cannot get state store " + storeName + " because the queryable store type" + " [class org.apache.kafka.streams.state.QueryableStoreTypes$SessionStoreType]" + " does not accept the actual store type" + " [class org.apache.kafka.streams.state.internals.MeteredTimestampedKeyValueStore]."));
}
}
use of org.apache.kafka.streams.errors.InvalidStateStoreException in project kafka by apache.
the class StoreQueryIntegrationTest method shouldQuerySpecificActivePartitionStores.
@Test
public void shouldQuerySpecificActivePartitionStores() throws Exception {
final int batch1NumMessages = 100;
final int key = 1;
final Semaphore semaphore = new Semaphore(0);
final StreamsBuilder builder = new StreamsBuilder();
getStreamsBuilderWithTopology(builder, semaphore);
final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration());
final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration());
final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
produceValueRange(key, 0, batch1NumMessages);
// Assert that all messages in the first batch were processed in a timely manner
assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
until(() -> {
final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, (topic, somekey, value, numPartitions) -> 0);
// key belongs to this partition
final int keyPartition = keyQueryMetadata.partition();
// key doesn't belongs to this partition
final int keyDontBelongPartition = (keyPartition == 0) ? 1 : 0;
final boolean kafkaStreams1IsActive = (keyQueryMetadata.activeHost().port() % 2) == 1;
final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> storeQueryParam = StoreQueryParameters.<ReadOnlyKeyValueStore<Integer, Integer>>fromNameAndType(TABLE_NAME, keyValueStore()).withPartition(keyPartition);
ReadOnlyKeyValueStore<Integer, Integer> store1 = null;
ReadOnlyKeyValueStore<Integer, Integer> store2 = null;
if (kafkaStreams1IsActive) {
store1 = getStore(kafkaStreams1, storeQueryParam);
} else {
store2 = getStore(kafkaStreams2, storeQueryParam);
}
if (kafkaStreams1IsActive) {
assertThat(store1, is(notNullValue()));
assertThat(store2, is(nullValue()));
} else {
assertThat(store2, is(notNullValue()));
assertThat(store1, is(nullValue()));
}
final StoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> storeQueryParam2 = StoreQueryParameters.<ReadOnlyKeyValueStore<Integer, Integer>>fromNameAndType(TABLE_NAME, keyValueStore()).withPartition(keyDontBelongPartition);
try {
// So, in that case, store3 would be null and the store4 would not return the value for key as wrong partition was requested
if (kafkaStreams1IsActive) {
assertThat(store1.get(key), is(notNullValue()));
assertThat(getStore(kafkaStreams2, storeQueryParam2).get(key), is(nullValue()));
final InvalidStateStoreException exception = assertThrows(InvalidStateStoreException.class, () -> getStore(kafkaStreams1, storeQueryParam2).get(key));
assertThat(exception.getMessage(), containsString("The specified partition 1 for store source-table does not exist."));
} else {
assertThat(store2.get(key), is(notNullValue()));
assertThat(getStore(kafkaStreams1, storeQueryParam2).get(key), is(nullValue()));
final InvalidStateStoreException exception = assertThrows(InvalidStateStoreException.class, () -> getStore(kafkaStreams2, storeQueryParam2).get(key));
assertThat(exception.getMessage(), containsString("The specified partition 1 for store source-table does not exist."));
}
return true;
} catch (final InvalidStateStoreException exception) {
verifyRetrievableException(exception);
LOG.info("Either streams wasn't running or a re-balancing took place. Will try again.");
return false;
}
});
}
use of org.apache.kafka.streams.errors.InvalidStateStoreException in project kafka by apache.
the class StoreQueryIntegrationTest method shouldQueryOnlyActivePartitionStoresByDefault.
@Test
public void shouldQueryOnlyActivePartitionStoresByDefault() throws Exception {
final int batch1NumMessages = 100;
final int key = 1;
final Semaphore semaphore = new Semaphore(0);
final StreamsBuilder builder = new StreamsBuilder();
getStreamsBuilderWithTopology(builder, semaphore);
final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration());
final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration());
final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
produceValueRange(key, 0, batch1NumMessages);
// Assert that all messages in the first batch were processed in a timely manner
assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
until(() -> {
final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, (topic, somekey, value, numPartitions) -> 0);
final QueryableStoreType<ReadOnlyKeyValueStore<Integer, Integer>> queryableStoreType = keyValueStore();
final ReadOnlyKeyValueStore<Integer, Integer> store1 = getStore(TABLE_NAME, kafkaStreams1, queryableStoreType);
final ReadOnlyKeyValueStore<Integer, Integer> store2 = getStore(TABLE_NAME, kafkaStreams2, queryableStoreType);
final boolean kafkaStreams1IsActive = (keyQueryMetadata.activeHost().port() % 2) == 1;
try {
if (kafkaStreams1IsActive) {
assertThat(store1.get(key), is(notNullValue()));
assertThat(store2.get(key), is(nullValue()));
} else {
assertThat(store1.get(key), is(nullValue()));
assertThat(store2.get(key), is(notNullValue()));
}
return true;
} catch (final InvalidStateStoreException exception) {
verifyRetrievableException(exception);
LOG.info("Either streams wasn't running or a re-balancing took place. Will try again.");
return false;
}
});
}
use of org.apache.kafka.streams.errors.InvalidStateStoreException in project kafka by apache.
the class StreamThreadStateStoreProvider method stores.
@SuppressWarnings("unchecked")
public <T> List<T> stores(final StoreQueryParameters storeQueryParams) {
final StreamThread.State state = streamThread.state();
if (state == StreamThread.State.DEAD) {
return Collections.emptyList();
}
final String storeName = storeQueryParams.storeName();
final QueryableStoreType<T> queryableStoreType = storeQueryParams.queryableStoreType();
final String topologyName = storeQueryParams instanceof NamedTopologyStoreQueryParameters ? ((NamedTopologyStoreQueryParameters) storeQueryParams).topologyName() : null;
if (storeQueryParams.staleStoresEnabled() ? state.isAlive() : state == StreamThread.State.RUNNING) {
final Collection<Task> tasks = storeQueryParams.staleStoresEnabled() ? streamThread.allTasks().values() : streamThread.activeTasks();
if (storeQueryParams.partition() != null) {
for (final Task task : tasks) {
if (task.id().partition() == storeQueryParams.partition() && (topologyName == null || topologyName.equals(task.id().topologyName())) && task.getStore(storeName) != null && storeName.equals(task.getStore(storeName).name())) {
final T typedStore = validateAndCastStores(task.getStore(storeName), queryableStoreType, storeName, task.id());
return Collections.singletonList(typedStore);
}
}
return Collections.emptyList();
} else {
final List<T> list = new ArrayList<>();
for (final Task task : tasks) {
final StateStore store = task.getStore(storeName);
if (store == null) {
// then this task doesn't have that store
} else {
final T typedStore = validateAndCastStores(store, queryableStoreType, storeName, task.id());
list.add(typedStore);
}
}
return list;
}
} else {
throw new InvalidStateStoreException("Cannot get state store " + storeName + " because the stream thread is " + state + ", not RUNNING" + (storeQueryParams.staleStoresEnabled() ? " or REBALANCING" : ""));
}
}
Aggregations