use of org.apache.kafka.streams.kstream.internals.MaterializedInternal in project kafka by apache.
the class TimestampedKeyValueStoreMaterializerTest method shouldCreateBuilderThatBuildsMeteredStoreWithCachingAndLoggingEnabled.
@Test
public void shouldCreateBuilderThatBuildsMeteredStoreWithCachingAndLoggingEnabled() {
final MaterializedInternal<String, String, KeyValueStore<Bytes, byte[]>> materialized = new MaterializedInternal<>(Materialized.as("store"), nameProvider, storePrefix);
final TimestampedKeyValueStoreMaterializer<String, String> materializer = new TimestampedKeyValueStoreMaterializer<>(materialized);
final StoreBuilder<TimestampedKeyValueStore<String, String>> builder = materializer.materialize();
final TimestampedKeyValueStore<String, String> store = builder.build();
final WrappedStateStore caching = (WrappedStateStore) ((WrappedStateStore) store).wrapped();
final StateStore logging = caching.wrapped();
assertThat(store, instanceOf(MeteredTimestampedKeyValueStore.class));
assertThat(caching, instanceOf(CachingKeyValueStore.class));
assertThat(logging, instanceOf(ChangeLoggingTimestampedKeyValueBytesStore.class));
}
use of org.apache.kafka.streams.kstream.internals.MaterializedInternal in project kafka by apache.
the class TimestampedKeyValueStoreMaterializerTest method shouldCreateBuilderThatBuildsStoreWithCachingDisabled.
@Test
public void shouldCreateBuilderThatBuildsStoreWithCachingDisabled() {
final MaterializedInternal<String, String, KeyValueStore<Bytes, byte[]>> materialized = new MaterializedInternal<>(Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("store").withCachingDisabled(), nameProvider, storePrefix);
final TimestampedKeyValueStoreMaterializer<String, String> materializer = new TimestampedKeyValueStoreMaterializer<>(materialized);
final StoreBuilder<TimestampedKeyValueStore<String, String>> builder = materializer.materialize();
final TimestampedKeyValueStore<String, String> store = builder.build();
final WrappedStateStore logging = (WrappedStateStore) ((WrappedStateStore) store).wrapped();
assertThat(logging, instanceOf(ChangeLoggingKeyValueBytesStore.class));
}
use of org.apache.kafka.streams.kstream.internals.MaterializedInternal in project kafka by apache.
the class TimestampedKeyValueStoreMaterializerTest method shouldCreateKeyValueStoreWithTheProvidedInnerStore.
@Test
public void shouldCreateKeyValueStoreWithTheProvidedInnerStore() {
final KeyValueBytesStoreSupplier supplier = EasyMock.createNiceMock(KeyValueBytesStoreSupplier.class);
final InMemoryKeyValueStore store = new InMemoryKeyValueStore("name");
EasyMock.expect(supplier.name()).andReturn("name").anyTimes();
EasyMock.expect(supplier.get()).andReturn(store);
EasyMock.expect(supplier.metricsScope()).andReturn("metricScope");
EasyMock.replay(supplier);
final MaterializedInternal<String, Integer, KeyValueStore<Bytes, byte[]>> materialized = new MaterializedInternal<>(Materialized.as(supplier), nameProvider, storePrefix);
final TimestampedKeyValueStoreMaterializer<String, Integer> materializer = new TimestampedKeyValueStoreMaterializer<>(materialized);
final StoreBuilder<TimestampedKeyValueStore<String, Integer>> builder = materializer.materialize();
final TimestampedKeyValueStore<String, Integer> built = builder.build();
assertThat(store.name(), CoreMatchers.equalTo(built.name()));
}
use of org.apache.kafka.streams.kstream.internals.MaterializedInternal in project kafka by apache.
the class TimestampedKeyValueStoreMaterializerTest method shouldCreateBuilderThatBuildsStoreWithLoggingDisabled.
@Test
public void shouldCreateBuilderThatBuildsStoreWithLoggingDisabled() {
final MaterializedInternal<String, String, KeyValueStore<Bytes, byte[]>> materialized = new MaterializedInternal<>(Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("store").withLoggingDisabled(), nameProvider, storePrefix);
final TimestampedKeyValueStoreMaterializer<String, String> materializer = new TimestampedKeyValueStoreMaterializer<>(materialized);
final StoreBuilder<TimestampedKeyValueStore<String, String>> builder = materializer.materialize();
final TimestampedKeyValueStore<String, String> store = builder.build();
final WrappedStateStore caching = (WrappedStateStore) ((WrappedStateStore) store).wrapped();
assertThat(caching, instanceOf(CachingKeyValueStore.class));
assertThat(caching.wrapped(), not(instanceOf(ChangeLoggingKeyValueBytesStore.class)));
}
use of org.apache.kafka.streams.kstream.internals.MaterializedInternal in project apache-kafka-on-k8s by banzaicloud.
the class StreamThreadTest method shouldUpdateStandbyTask.
@SuppressWarnings("unchecked")
@Test
public void shouldUpdateStandbyTask() {
final String storeName1 = "count-one";
final String storeName2 = "table-two";
final String changelogName = applicationId + "-" + storeName1 + "-changelog";
final TopicPartition partition1 = new TopicPartition(changelogName, 1);
final TopicPartition partition2 = t2p1;
internalStreamsBuilder.stream(Collections.singleton(topic1), consumed).groupByKey().count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>as(storeName1));
internalStreamsBuilder.table(topic2, new ConsumedInternal(), new MaterializedInternal(Materialized.as(storeName2), internalStreamsBuilder, ""));
final StreamThread thread = createStreamThread(clientId, config, false);
final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer;
restoreConsumer.updatePartitions(changelogName, Collections.singletonList(new PartitionInfo(changelogName, 1, null, new Node[0], new Node[0])));
restoreConsumer.assign(Utils.mkSet(partition1, partition2));
restoreConsumer.updateEndOffsets(Collections.singletonMap(partition1, 10L));
restoreConsumer.updateBeginningOffsets(Collections.singletonMap(partition1, 0L));
restoreConsumer.updateEndOffsets(Collections.singletonMap(partition2, 10L));
restoreConsumer.updateBeginningOffsets(Collections.singletonMap(partition2, 0L));
// let the store1 be restored from 0 to 10; store2 be restored from 0 to (committed offset) 5
clientSupplier.consumer.assign(Utils.mkSet(partition2));
clientSupplier.consumer.commitSync(Collections.singletonMap(partition2, new OffsetAndMetadata(5L, "")));
for (long i = 0L; i < 10L; i++) {
restoreConsumer.addRecord(new ConsumerRecord<>(changelogName, 1, i, ("K" + i).getBytes(), ("V" + i).getBytes()));
restoreConsumer.addRecord(new ConsumerRecord<>(topic2, 1, i, ("K" + i).getBytes(), ("V" + i).getBytes()));
}
thread.setState(StreamThread.State.RUNNING);
thread.rebalanceListener.onPartitionsRevoked(null);
final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
// assign single partition
standbyTasks.put(task1, Collections.singleton(t1p1));
standbyTasks.put(task3, Collections.singleton(t2p1));
thread.taskManager().setAssignmentMetadata(Collections.<TaskId, Set<TopicPartition>>emptyMap(), standbyTasks);
thread.rebalanceListener.onPartitionsAssigned(Collections.<TopicPartition>emptyList());
thread.runOnce(-1);
final StandbyTask standbyTask1 = thread.taskManager().standbyTask(partition1);
final StandbyTask standbyTask2 = thread.taskManager().standbyTask(partition2);
final KeyValueStore<Object, Long> store1 = (KeyValueStore<Object, Long>) standbyTask1.getStore(storeName1);
final KeyValueStore<Object, Long> store2 = (KeyValueStore<Object, Long>) standbyTask2.getStore(storeName2);
assertEquals(10L, store1.approximateNumEntries());
assertEquals(5L, store2.approximateNumEntries());
assertEquals(Collections.singleton(partition2), restoreConsumer.paused());
assertEquals(1, thread.standbyRecords().size());
assertEquals(5, thread.standbyRecords().get(partition2).size());
}
Aggregations