use of org.apache.kafka.streams.KeyValue in project kafka by apache.
the class AdjustStreamThreadCountTest method shouldResizeCacheAfterThreadReplacement.
@Test
public void shouldResizeCacheAfterThreadReplacement() throws InterruptedException {
final long totalCacheBytes = 10L;
final Properties props = new Properties();
props.putAll(properties);
props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 2);
props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, totalCacheBytes);
final AtomicBoolean injectError = new AtomicBoolean(false);
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream = builder.stream(inputTopic);
stream.transform(() -> new Transformer<String, String, KeyValue<String, String>>() {
@Override
public void init(final ProcessorContext context) {
context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, timestamp -> {
if (Thread.currentThread().getName().endsWith("StreamThread-1") && injectError.get()) {
injectError.set(false);
throw new RuntimeException("BOOM");
}
});
}
@Override
public KeyValue<String, String> transform(final String key, final String value) {
return new KeyValue<>(key, value);
}
@Override
public void close() {
}
});
try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) {
addStreamStateChangeListener(kafkaStreams);
kafkaStreams.setUncaughtExceptionHandler(e -> StreamThreadExceptionResponse.REPLACE_THREAD);
startStreamsAndWaitForRunning(kafkaStreams);
stateTransitionHistory.clear();
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
injectError.set(true);
waitForCondition(() -> !injectError.get(), "StreamThread did not hit and reset the injected error");
waitForTransitionFromRebalancingToRunning();
for (final String log : appender.getMessages()) {
// after we replace the thread there should be two remaining threads with 5 bytes each
if (log.endsWith("Adding StreamThread-3, there will now be 2 live threads and the new cache size per thread is 5")) {
return;
}
}
}
}
fail();
}
use of org.apache.kafka.streams.KeyValue in project kafka by apache.
the class AbstractResetIntegrationTest method testReprocessingFromScratchAfterResetWithoutIntermediateUserTopic.
@Test
public void testReprocessingFromScratchAfterResetWithoutIntermediateUserTopic() throws Exception {
final String appID = IntegrationTestUtils.safeUniqueTestName(getClass(), testName);
streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, appID);
// RUN
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
streams.start();
final List<KeyValue<Long, Long>> result = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close();
waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
// RESET
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
streams.cleanUp();
cleanGlobal(false, null, null, appID);
waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
assertInternalTopicsGotDeleted(null);
// RE-RUN
streams.start();
final List<KeyValue<Long, Long>> resultRerun = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close();
assertThat(resultRerun, equalTo(result));
waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
cleanGlobal(false, null, null, appID);
}
use of org.apache.kafka.streams.KeyValue in project kafka by apache.
the class StateRestoreCallbackAdapterTest method shouldConvertToKeyValueBatches.
@Test
public void shouldConvertToKeyValueBatches() {
final ArrayList<KeyValue<byte[], byte[]>> actual = new ArrayList<>();
final BatchingStateRestoreCallback callback = new BatchingStateRestoreCallback() {
@Override
public void restoreAll(final Collection<KeyValue<byte[], byte[]>> records) {
actual.addAll(records);
}
@Override
public void restore(final byte[] key, final byte[] value) {
// unreachable
}
};
final RecordBatchingStateRestoreCallback adapted = adapt(callback);
final byte[] key1 = { 1 };
final byte[] value1 = { 2 };
final byte[] key2 = { 3 };
final byte[] value2 = { 4 };
adapted.restoreBatch(asList(new ConsumerRecord<>("topic1", 0, 0L, key1, value1), new ConsumerRecord<>("topic2", 1, 1L, key2, value2)));
assertThat(actual, is(asList(new KeyValue<>(key1, value1), new KeyValue<>(key2, value2))));
}
use of org.apache.kafka.streams.KeyValue in project kafka by apache.
the class StateRestoreCallbackAdapterTest method shouldConvertToKeyValue.
@Test
public void shouldConvertToKeyValue() {
final ArrayList<KeyValue<byte[], byte[]>> actual = new ArrayList<>();
final StateRestoreCallback callback = (key, value) -> actual.add(new KeyValue<>(key, value));
final RecordBatchingStateRestoreCallback adapted = adapt(callback);
final byte[] key1 = { 1 };
final byte[] value1 = { 2 };
final byte[] key2 = { 3 };
final byte[] value2 = { 4 };
adapted.restoreBatch(asList(new ConsumerRecord<>("topic1", 0, 0L, key1, value1), new ConsumerRecord<>("topic2", 1, 1L, key2, value2)));
assertThat(actual, is(asList(new KeyValue<>(key1, value1), new KeyValue<>(key2, value2))));
}
use of org.apache.kafka.streams.KeyValue in project kafka by apache.
the class PageViewUntypedDemo method main.
public static void main(final String[] args) throws Exception {
final Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-pageview-untyped");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, JsonTimestampExtractor.class);
props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
// setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
final StreamsBuilder builder = new StreamsBuilder();
final Serializer<JsonNode> jsonSerializer = new JsonSerializer();
final Deserializer<JsonNode> jsonDeserializer = new JsonDeserializer();
final Serde<JsonNode> jsonSerde = Serdes.serdeFrom(jsonSerializer, jsonDeserializer);
final Consumed<String, JsonNode> consumed = Consumed.with(Serdes.String(), jsonSerde);
final KStream<String, JsonNode> views = builder.stream("streams-pageview-input", consumed);
final KTable<String, JsonNode> users = builder.table("streams-userprofile-input", consumed);
final KTable<String, String> userRegions = users.mapValues(record -> record.get("region").textValue());
final Duration duration24Hours = Duration.ofHours(24);
final KStream<JsonNode, JsonNode> regionCount = views.leftJoin(userRegions, (view, region) -> {
final ObjectNode jNode = JsonNodeFactory.instance.objectNode();
return (JsonNode) jNode.put("user", view.get("user").textValue()).put("page", view.get("page").textValue()).put("region", region == null ? "UNKNOWN" : region);
}).map((user, viewRegion) -> new KeyValue<>(viewRegion.get("region").textValue(), viewRegion)).groupByKey(Grouped.with(Serdes.String(), jsonSerde)).windowedBy(TimeWindows.ofSizeAndGrace(Duration.ofDays(7), duration24Hours).advanceBy(Duration.ofSeconds(1))).count().toStream().map((key, value) -> {
final ObjectNode keyNode = JsonNodeFactory.instance.objectNode();
keyNode.put("window-start", key.window().start()).put("region", key.key());
final ObjectNode valueNode = JsonNodeFactory.instance.objectNode();
valueNode.put("count", value);
return new KeyValue<>((JsonNode) keyNode, (JsonNode) valueNode);
});
// write to the result topic
regionCount.to("streams-pageviewstats-untyped-output", Produced.with(jsonSerde, jsonSerde));
final KafkaStreams streams = new KafkaStreams(builder.build(), props);
streams.start();
// usually the stream application would be running forever,
// in this example we just let it run for some time and stop since the input data is finite.
Thread.sleep(5000L);
streams.close();
}
Aggregations