use of org.apache.kafka.streams.processor.internals.InternalTopologyBuilder in project kafka by apache.
the class StreamsBuilderTest method shouldReuseSourceTopicAsChangelogsWithOptimization20.
@Test
public void shouldReuseSourceTopicAsChangelogsWithOptimization20() {
final String topic = "topic";
builder.table(topic, Materialized.<Long, String, KeyValueStore<Bytes, byte[]>>as("store"));
final Properties props = StreamsTestUtils.getStreamsConfig();
props.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE);
final Topology topology = builder.build(props);
final InternalTopologyBuilder internalTopologyBuilder = TopologyWrapper.getInternalTopologyBuilder(topology);
internalTopologyBuilder.rewriteTopology(new StreamsConfig(props));
assertThat(internalTopologyBuilder.buildTopology().storeToChangelogTopic(), equalTo(Collections.singletonMap("store", "topic")));
assertThat(internalTopologyBuilder.stateStores().keySet(), equalTo(Collections.singleton("store")));
assertThat(internalTopologyBuilder.stateStores().get("store").loggingEnabled(), equalTo(false));
assertThat(internalTopologyBuilder.subtopologyToTopicsInfo().get(SUBTOPOLOGY_0).nonSourceChangelogTopics().isEmpty(), equalTo(true));
}
use of org.apache.kafka.streams.processor.internals.InternalTopologyBuilder in project kafka by apache.
the class StreamsBuilderTest method shouldNotReuseSourceTopicAsChangelogsByDefault.
@Test
public void shouldNotReuseSourceTopicAsChangelogsByDefault() {
final String topic = "topic";
builder.table(topic, Materialized.<Long, String, KeyValueStore<Bytes, byte[]>>as("store"));
final InternalTopologyBuilder internalTopologyBuilder = TopologyWrapper.getInternalTopologyBuilder(builder.build());
internalTopologyBuilder.setApplicationId("appId");
assertThat(internalTopologyBuilder.buildTopology().storeToChangelogTopic(), equalTo(Collections.singletonMap("store", "appId-store-changelog")));
assertThat(internalTopologyBuilder.stateStores().keySet(), equalTo(Collections.singleton("store")));
assertThat(internalTopologyBuilder.stateStores().get("store").loggingEnabled(), equalTo(true));
assertThat(internalTopologyBuilder.subtopologyToTopicsInfo().get(SUBTOPOLOGY_0).stateChangelogTopics.keySet(), equalTo(Collections.singleton("appId-store-changelog")));
}
use of org.apache.kafka.streams.processor.internals.InternalTopologyBuilder in project kafka by apache.
the class KTableMapValuesTest method doTestValueGetter.
private void doTestValueGetter(final StreamsBuilder builder, final String topic1, final KTableImpl<String, String, Integer> table2, final KTableImpl<String, String, Integer> table3) {
final Topology topology = builder.build();
final KTableValueGetterSupplier<String, Integer> getterSupplier2 = table2.valueGetterSupplier();
final KTableValueGetterSupplier<String, Integer> getterSupplier3 = table3.valueGetterSupplier();
final InternalTopologyBuilder topologyBuilder = TopologyWrapper.getInternalTopologyBuilder(topology);
topologyBuilder.connectProcessorAndStateStores(table2.name, getterSupplier2.storeNames());
topologyBuilder.connectProcessorAndStateStores(table3.name, getterSupplier3.storeNames());
try (final TopologyTestDriverWrapper driver = new TopologyTestDriverWrapper(builder.build(), props)) {
final TestInputTopic<String, String> inputTopic1 = driver.createInputTopic(topic1, new StringSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO);
final KTableValueGetter<String, Integer> getter2 = getterSupplier2.get();
final KTableValueGetter<String, Integer> getter3 = getterSupplier3.get();
getter2.init(driver.setCurrentNodeForProcessorContext(table2.name));
getter3.init(driver.setCurrentNodeForProcessorContext(table3.name));
inputTopic1.pipeInput("A", "01", 50L);
inputTopic1.pipeInput("B", "01", 10L);
inputTopic1.pipeInput("C", "01", 30L);
assertEquals(ValueAndTimestamp.make(1, 50L), getter2.get("A"));
assertEquals(ValueAndTimestamp.make(1, 10L), getter2.get("B"));
assertEquals(ValueAndTimestamp.make(1, 30L), getter2.get("C"));
assertEquals(ValueAndTimestamp.make(-1, 50L), getter3.get("A"));
assertEquals(ValueAndTimestamp.make(-1, 10L), getter3.get("B"));
assertEquals(ValueAndTimestamp.make(-1, 30L), getter3.get("C"));
inputTopic1.pipeInput("A", "02", 25L);
inputTopic1.pipeInput("B", "02", 20L);
assertEquals(ValueAndTimestamp.make(2, 25L), getter2.get("A"));
assertEquals(ValueAndTimestamp.make(2, 20L), getter2.get("B"));
assertEquals(ValueAndTimestamp.make(1, 30L), getter2.get("C"));
assertEquals(ValueAndTimestamp.make(-2, 25L), getter3.get("A"));
assertEquals(ValueAndTimestamp.make(-2, 20L), getter3.get("B"));
assertEquals(ValueAndTimestamp.make(-1, 30L), getter3.get("C"));
inputTopic1.pipeInput("A", "03", 35L);
assertEquals(ValueAndTimestamp.make(3, 35L), getter2.get("A"));
assertEquals(ValueAndTimestamp.make(2, 20L), getter2.get("B"));
assertEquals(ValueAndTimestamp.make(1, 30L), getter2.get("C"));
assertEquals(ValueAndTimestamp.make(-3, 35L), getter3.get("A"));
assertEquals(ValueAndTimestamp.make(-2, 20L), getter3.get("B"));
assertEquals(ValueAndTimestamp.make(-1, 30L), getter3.get("C"));
inputTopic1.pipeInput("A", (String) null, 1L);
assertNull(getter2.get("A"));
assertEquals(ValueAndTimestamp.make(2, 20L), getter2.get("B"));
assertEquals(ValueAndTimestamp.make(1, 30L), getter2.get("C"));
assertNull(getter3.get("A"));
assertEquals(ValueAndTimestamp.make(-2, 20L), getter3.get("B"));
assertEquals(ValueAndTimestamp.make(-1, 30L), getter3.get("C"));
}
}
use of org.apache.kafka.streams.processor.internals.InternalTopologyBuilder in project kafka by apache.
the class KTableSourceTest method testValueGetter.
@Test
public void testValueGetter() {
final StreamsBuilder builder = new StreamsBuilder();
final String topic1 = "topic1";
@SuppressWarnings("unchecked") final KTableImpl<String, String, String> table1 = (KTableImpl<String, String, String>) builder.table(topic1, stringConsumed, Materialized.as("store"));
final Topology topology = builder.build();
final KTableValueGetterSupplier<String, String> getterSupplier1 = table1.valueGetterSupplier();
final InternalTopologyBuilder topologyBuilder = TopologyWrapper.getInternalTopologyBuilder(topology);
topologyBuilder.connectProcessorAndStateStores(table1.name, getterSupplier1.storeNames());
try (final TopologyTestDriverWrapper driver = new TopologyTestDriverWrapper(builder.build(), props)) {
final TestInputTopic<String, String> inputTopic1 = driver.createInputTopic(topic1, new StringSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO);
final KTableValueGetter<String, String> getter1 = getterSupplier1.get();
getter1.init(driver.setCurrentNodeForProcessorContext(table1.name));
inputTopic1.pipeInput("A", "01", 10L);
inputTopic1.pipeInput("B", "01", 20L);
inputTopic1.pipeInput("C", "01", 15L);
assertEquals(ValueAndTimestamp.make("01", 10L), getter1.get("A"));
assertEquals(ValueAndTimestamp.make("01", 20L), getter1.get("B"));
assertEquals(ValueAndTimestamp.make("01", 15L), getter1.get("C"));
inputTopic1.pipeInput("A", "02", 30L);
inputTopic1.pipeInput("B", "02", 5L);
assertEquals(ValueAndTimestamp.make("02", 30L), getter1.get("A"));
assertEquals(ValueAndTimestamp.make("02", 5L), getter1.get("B"));
assertEquals(ValueAndTimestamp.make("01", 15L), getter1.get("C"));
inputTopic1.pipeInput("A", "03", 29L);
assertEquals(ValueAndTimestamp.make("03", 29L), getter1.get("A"));
assertEquals(ValueAndTimestamp.make("02", 5L), getter1.get("B"));
assertEquals(ValueAndTimestamp.make("01", 15L), getter1.get("C"));
inputTopic1.pipeInput("A", null, 50L);
inputTopic1.pipeInput("B", null, 3L);
assertNull(getter1.get("A"));
assertNull(getter1.get("B"));
assertEquals(ValueAndTimestamp.make("01", 15L), getter1.get("C"));
}
}
use of org.apache.kafka.streams.processor.internals.InternalTopologyBuilder in project apache-kafka-on-k8s by banzaicloud.
the class KStreamTestDriver method setUp.
public void setUp(final StreamsBuilder builder, final File stateDir, final Serde<?> keySerde, final Serde<?> valSerde, final long cacheSize) {
final InternalTopologyBuilder internalTopologyBuilder = StreamsBuilderTest.internalTopologyBuilder(builder);
internalTopologyBuilder.setApplicationId("TestDriver");
topology = internalTopologyBuilder.build(null);
globalTopology = internalTopologyBuilder.buildGlobalStateTopology();
final ThreadCache cache = new ThreadCache(logContext, cacheSize, new MockStreamsMetrics(new Metrics()));
context = new InternalMockProcessorContext(stateDir, keySerde, valSerde, new MockRecordCollector(), cache);
context.setRecordContext(new ProcessorRecordContext(0, 0, 0, "topic"));
// store map that are required for joins etc.
if (globalTopology != null) {
initTopology(globalTopology, globalTopology.globalStateStores());
}
initTopology(topology, topology.stateStores());
}
Aggregations