use of org.apache.kafka.streams.processor.internals.InternalTopologyBuilder in project kafka by apache.
the class KTableFilterTest method doTestValueGetter.
private void doTestValueGetter(final StreamsBuilder builder, final KTableImpl<String, Integer, Integer> table2, final KTableImpl<String, Integer, Integer> table3, final String topic1) {
final Topology topology = builder.build();
final KTableValueGetterSupplier<String, Integer> getterSupplier2 = table2.valueGetterSupplier();
final KTableValueGetterSupplier<String, Integer> getterSupplier3 = table3.valueGetterSupplier();
final InternalTopologyBuilder topologyBuilder = TopologyWrapper.getInternalTopologyBuilder(topology);
topologyBuilder.connectProcessorAndStateStores(table2.name, getterSupplier2.storeNames());
topologyBuilder.connectProcessorAndStateStores(table3.name, getterSupplier3.storeNames());
try (final TopologyTestDriverWrapper driver = new TopologyTestDriverWrapper(topology, props)) {
final TestInputTopic<String, Integer> inputTopic = driver.createInputTopic(topic1, new StringSerializer(), new IntegerSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO);
final KTableValueGetter<String, Integer> getter2 = getterSupplier2.get();
final KTableValueGetter<String, Integer> getter3 = getterSupplier3.get();
getter2.init(driver.setCurrentNodeForProcessorContext(table2.name));
getter3.init(driver.setCurrentNodeForProcessorContext(table3.name));
inputTopic.pipeInput("A", 1, 5L);
inputTopic.pipeInput("B", 1, 10L);
inputTopic.pipeInput("C", 1, 15L);
assertNull(getter2.get("A"));
assertNull(getter2.get("B"));
assertNull(getter2.get("C"));
assertEquals(ValueAndTimestamp.make(1, 5L), getter3.get("A"));
assertEquals(ValueAndTimestamp.make(1, 10L), getter3.get("B"));
assertEquals(ValueAndTimestamp.make(1, 15L), getter3.get("C"));
inputTopic.pipeInput("A", 2, 10L);
inputTopic.pipeInput("B", 2, 5L);
assertEquals(ValueAndTimestamp.make(2, 10L), getter2.get("A"));
assertEquals(ValueAndTimestamp.make(2, 5L), getter2.get("B"));
assertNull(getter2.get("C"));
assertNull(getter3.get("A"));
assertNull(getter3.get("B"));
assertEquals(ValueAndTimestamp.make(1, 15L), getter3.get("C"));
inputTopic.pipeInput("A", 3, 15L);
assertNull(getter2.get("A"));
assertEquals(ValueAndTimestamp.make(2, 5L), getter2.get("B"));
assertNull(getter2.get("C"));
assertEquals(ValueAndTimestamp.make(3, 15L), getter3.get("A"));
assertNull(getter3.get("B"));
assertEquals(ValueAndTimestamp.make(1, 15L), getter3.get("C"));
inputTopic.pipeInput("A", null, 10L);
inputTopic.pipeInput("B", null, 20L);
assertNull(getter2.get("A"));
assertNull(getter2.get("B"));
assertNull(getter2.get("C"));
assertNull(getter3.get("A"));
assertNull(getter3.get("B"));
assertEquals(ValueAndTimestamp.make(1, 15L), getter3.get("C"));
}
}
use of org.apache.kafka.streams.processor.internals.InternalTopologyBuilder in project kafka by apache.
the class KStreamKStreamJoinTest method shouldEnableLoggingWithCustomConfigOnStreamJoined.
@Test
public void shouldEnableLoggingWithCustomConfigOnStreamJoined() {
final JoinWindows joinWindows = JoinWindows.ofTimeDifferenceAndGrace(ofMillis(100), Duration.ofMillis(50));
final StreamJoined<String, Integer, Integer> streamJoined = StreamJoined.with(Serdes.String(), Serdes.Integer(), Serdes.Integer()).withStoreName("store").withLoggingEnabled(Collections.singletonMap("test", "property"));
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, Integer> left = builder.stream("left", Consumed.with(Serdes.String(), Serdes.Integer()));
final KStream<String, Integer> right = builder.stream("right", Consumed.with(Serdes.String(), Serdes.Integer()));
left.join(right, Integer::sum, joinWindows, streamJoined);
final Topology topology = builder.build();
final InternalTopologyBuilder internalTopologyBuilder = TopologyWrapper.getInternalTopologyBuilder(topology);
internalTopologyBuilder.buildSubtopology(0);
assertThat(internalTopologyBuilder.stateStores().get("store-this-join-store").loggingEnabled(), equalTo(true));
assertThat(internalTopologyBuilder.stateStores().get("store-other-join-store").loggingEnabled(), equalTo(true));
assertThat(internalTopologyBuilder.subtopologyToTopicsInfo().get(SUBTOPOLOGY_0).stateChangelogTopics.size(), equalTo(2));
for (final InternalTopicConfig config : internalTopologyBuilder.subtopologyToTopicsInfo().get(SUBTOPOLOGY_0).stateChangelogTopics.values()) {
assertThat(config.getProperties(Collections.emptyMap(), 0).get("test"), equalTo("property"));
}
}
use of org.apache.kafka.streams.processor.internals.InternalTopologyBuilder in project kafka by apache.
the class KStreamKStreamJoinTest method shouldDisableLoggingOnStreamJoined.
@Test
public void shouldDisableLoggingOnStreamJoined() {
final JoinWindows joinWindows = JoinWindows.ofTimeDifferenceAndGrace(ofMillis(100), Duration.ofMillis(50));
final StreamJoined<String, Integer, Integer> streamJoined = StreamJoined.with(Serdes.String(), Serdes.Integer(), Serdes.Integer()).withStoreName("store").withLoggingDisabled();
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, Integer> left = builder.stream("left", Consumed.with(Serdes.String(), Serdes.Integer()));
final KStream<String, Integer> right = builder.stream("right", Consumed.with(Serdes.String(), Serdes.Integer()));
left.join(right, Integer::sum, joinWindows, streamJoined);
final Topology topology = builder.build();
final InternalTopologyBuilder internalTopologyBuilder = TopologyWrapper.getInternalTopologyBuilder(topology);
assertThat(internalTopologyBuilder.stateStores().get("store-this-join-store").loggingEnabled(), equalTo(false));
assertThat(internalTopologyBuilder.stateStores().get("store-other-join-store").loggingEnabled(), equalTo(false));
}
use of org.apache.kafka.streams.processor.internals.InternalTopologyBuilder in project kafka by apache.
the class StreamThreadStateStoreProviderTest method before.
@Before
public void before() {
final TopologyWrapper topology = new TopologyWrapper();
topology.addSource("the-source", topicName);
topology.addProcessor("the-processor", new MockApiProcessorSupplier<>(), "the-source");
topology.addStateStore(Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore("kv-store"), Serdes.String(), Serdes.String()), "the-processor");
topology.addStateStore(Stores.timestampedKeyValueStoreBuilder(Stores.inMemoryKeyValueStore("timestamped-kv-store"), Serdes.String(), Serdes.String()), "the-processor");
topology.addStateStore(Stores.windowStoreBuilder(Stores.inMemoryWindowStore("window-store", Duration.ofMillis(10L), Duration.ofMillis(2L), false), Serdes.String(), Serdes.String()), "the-processor");
topology.addStateStore(Stores.timestampedWindowStoreBuilder(Stores.inMemoryWindowStore("timestamped-window-store", Duration.ofMillis(10L), Duration.ofMillis(2L), false), Serdes.String(), Serdes.String()), "the-processor");
topology.addStateStore(Stores.sessionStoreBuilder(Stores.inMemorySessionStore("session-store", Duration.ofMillis(10L)), Serdes.String(), Serdes.String()), "the-processor");
final Properties properties = new Properties();
final String applicationId = "applicationId";
properties.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
stateDir = TestUtils.tempDirectory();
properties.put(StreamsConfig.STATE_DIR_CONFIG, stateDir.getPath());
final StreamsConfig streamsConfig = new StreamsConfig(properties);
final MockClientSupplier clientSupplier = new MockClientSupplier();
configureClients(clientSupplier, "applicationId-kv-store-changelog");
configureClients(clientSupplier, "applicationId-window-store-changelog");
final InternalTopologyBuilder internalTopologyBuilder = topology.getInternalBuilder(applicationId);
final ProcessorTopology processorTopology = internalTopologyBuilder.buildTopology();
tasks = new HashMap<>();
stateDirectory = new StateDirectory(streamsConfig, new MockTime(), true, false);
taskOne = createStreamsTask(streamsConfig, clientSupplier, processorTopology, new TaskId(0, 0));
taskOne.initializeIfNeeded();
tasks.put(new TaskId(0, 0), taskOne);
final StreamTask taskTwo = createStreamsTask(streamsConfig, clientSupplier, processorTopology, new TaskId(0, 1));
taskTwo.initializeIfNeeded();
tasks.put(new TaskId(0, 1), taskTwo);
threadMock = EasyMock.createNiceMock(StreamThread.class);
provider = new StreamThreadStateStoreProvider(threadMock);
}
Aggregations