Search in sources :

Example 31 with KStream

use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.

the class AdjustStreamThreadCountTest method shouldResizeCacheAfterThreadReplacement.

@Test
public void shouldResizeCacheAfterThreadReplacement() throws InterruptedException {
    final long totalCacheBytes = 10L;
    final Properties props = new Properties();
    props.putAll(properties);
    props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 2);
    props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, totalCacheBytes);
    final AtomicBoolean injectError = new AtomicBoolean(false);
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, String> stream = builder.stream(inputTopic);
    stream.transform(() -> new Transformer<String, String, KeyValue<String, String>>() {

        @Override
        public void init(final ProcessorContext context) {
            context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, timestamp -> {
                if (Thread.currentThread().getName().endsWith("StreamThread-1") && injectError.get()) {
                    injectError.set(false);
                    throw new RuntimeException("BOOM");
                }
            });
        }

        @Override
        public KeyValue<String, String> transform(final String key, final String value) {
            return new KeyValue<>(key, value);
        }

        @Override
        public void close() {
        }
    });
    try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) {
        addStreamStateChangeListener(kafkaStreams);
        kafkaStreams.setUncaughtExceptionHandler(e -> StreamThreadExceptionResponse.REPLACE_THREAD);
        startStreamsAndWaitForRunning(kafkaStreams);
        stateTransitionHistory.clear();
        try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
            injectError.set(true);
            waitForCondition(() -> !injectError.get(), "StreamThread did not hit and reset the injected error");
            waitForTransitionFromRebalancingToRunning();
            for (final String log : appender.getMessages()) {
                // after we replace the thread there should be two remaining threads with 5 bytes each
                if (log.endsWith("Adding StreamThread-3, there will now be 2 live threads and the new cache size per thread is 5")) {
                    return;
                }
            }
        }
    }
    fail();
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) Assert.fail(org.junit.Assert.fail) AfterClass(org.junit.AfterClass) ThreadMetadata(org.apache.kafka.streams.ThreadMetadata) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkObjectProperties(org.apache.kafka.common.utils.Utils.mkObjectProperties) KeyValue(org.apache.kafka.streams.KeyValue) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Category(org.junit.experimental.categories.Category) Executors(java.util.concurrent.Executors) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) CountDownLatch(java.util.concurrent.CountDownLatch) IntegrationTestUtils.purgeLocalStreamsState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.purgeLocalStreamsState) List(java.util.List) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Optional(java.util.Optional) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) Assert.assertThrows(org.junit.Assert.assertThrows) CoreMatchers.not(org.hamcrest.CoreMatchers.not) IntegrationTest(org.apache.kafka.test.IntegrationTest) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) KStream(org.apache.kafka.streams.kstream.KStream) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) PunctuationType(org.apache.kafka.streams.processor.PunctuationType) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) ExecutorService(java.util.concurrent.ExecutorService) Before(org.junit.Before) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Properties(java.util.Properties) Transformer(org.apache.kafka.streams.kstream.Transformer) TestUtils.waitForCondition(org.apache.kafka.test.TestUtils.waitForCondition) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) TimeUnit(java.util.concurrent.TimeUnit) Rule(org.junit.Rule) Assert.assertNull(org.junit.Assert.assertNull) StreamThreadExceptionResponse(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Assert.assertEquals(org.junit.Assert.assertEquals) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) Utils.mkObjectProperties(org.apache.kafka.common.utils.Utils.mkObjectProperties) Properties(java.util.Properties) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 32 with KStream

use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount.

@Test
public void shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount() {
    builder = new CorruptedInternalTopologyBuilder();
    topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configProps()));
    final InternalStreamsBuilder streamsBuilder = new InternalStreamsBuilder(builder);
    final KStream<String, String> inputTopic = streamsBuilder.stream(singleton("topic1"), new ConsumedInternal<>());
    final KTable<String, String> inputTable = streamsBuilder.table("topic2", new ConsumedInternal<>(), new MaterializedInternal<>(Materialized.as("store")));
    inputTopic.groupBy((k, v) -> k, Grouped.with("GroupName", Serdes.String(), Serdes.String())).windowedBy(TimeWindows.of(Duration.ofMinutes(10))).aggregate(() -> "", (k, v, a) -> a + k).leftJoin(inputTable, v -> v, (x, y) -> x + y);
    streamsBuilder.buildAndOptimizeTopology();
    configureDefault();
    subscriptions.put("consumer", new Subscription(singletonList("topic"), defaultSubscriptionInfo.encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    assertThat(AssignmentInfo.decode(assignments.get("consumer").userData()).errCode(), equalTo(AssignorError.ASSIGNMENT_ERROR.code()));
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) ConsumedInternal(org.apache.kafka.streams.kstream.internals.ConsumedInternal) KafkaException(org.apache.kafka.common.KafkaException) Collections.singletonList(java.util.Collections.singletonList) AdminClient(org.apache.kafka.clients.admin.AdminClient) Cluster(org.apache.kafka.common.Cluster) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) MockApiProcessorSupplier(org.apache.kafka.test.MockApiProcessorSupplier) Arrays.asList(java.util.Arrays.asList) Duration(java.time.Duration) Map(java.util.Map) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) ReferenceContainer(org.apache.kafka.streams.processor.internals.assignment.ReferenceContainer) TASK_0_0(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_0) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) TASK_0_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_1) Set(java.util.Set) TASK_0_2(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_2) TASK_0_3(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_3) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) EMPTY_TASKS(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.EMPTY_TASKS) RebalanceProtocol(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.RebalanceProtocol) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Assert.assertFalse(org.junit.Assert.assertFalse) Node(org.apache.kafka.common.Node) Matchers.is(org.hamcrest.Matchers.is) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) HostInfo(org.apache.kafka.streams.state.HostInfo) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) RunWith(org.junit.runner.RunWith) EasyMock.mock(org.easymock.EasyMock.mock) ArrayList(java.util.ArrayList) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) StickyTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.StickyTaskAssignor) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Capture(org.easymock.Capture) KTable(org.apache.kafka.streams.kstream.KTable) KeyValueMapper(org.apache.kafka.streams.kstream.KeyValueMapper) Properties(java.util.Properties) Utils.mkSortedSet(org.apache.kafka.common.utils.Utils.mkSortedSet) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) Grouped(org.apache.kafka.streams.kstream.Grouped) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) AssignorError(org.apache.kafka.streams.processor.internals.assignment.AssignorError) InternalConfig(org.apache.kafka.streams.StreamsConfig.InternalConfig) TimeWindows(org.apache.kafka.streams.kstream.TimeWindows) Subtopology(org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology) Assert.assertEquals(org.junit.Assert.assertEquals) FallbackPriorTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.FallbackPriorTaskAssignor) SortedSet(java.util.SortedSet) InternalStreamsBuilder(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder) ByteBuffer(java.nio.ByteBuffer) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Collections.singleton(java.util.Collections.singleton) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) TopologyWrapper(org.apache.kafka.streams.TopologyWrapper) Serdes(org.apache.kafka.common.serialization.Serdes) Assert.fail(org.junit.Assert.fail) Parameterized(org.junit.runners.Parameterized) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) Collections.emptyList(java.util.Collections.emptyList) LATEST_SUPPORTED_VERSION(org.apache.kafka.streams.processor.internals.assignment.StreamsAssignmentProtocolVersions.LATEST_SUPPORTED_VERSION) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) TASK_2_0(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_2_0) UUID(java.util.UUID) TASK_2_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_2_1) MaterializedInternal(org.apache.kafka.streams.kstream.internals.MaterializedInternal) AssignmentTestUtils.getInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.getInfo) Collectors(java.util.stream.Collectors) AssignorConfiguration(org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) List(java.util.List) ValueJoiner(org.apache.kafka.streams.kstream.ValueJoiner) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Materialized(org.apache.kafka.streams.kstream.Materialized) StreamsPartitionAssignor.assignTasksToThreads(org.apache.kafka.streams.processor.internals.StreamsPartitionAssignor.assignTasksToThreads) Duration.ofMillis(java.time.Duration.ofMillis) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) TaskId(org.apache.kafka.streams.processor.TaskId) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Assert.assertThrows(org.junit.Assert.assertThrows) CoreMatchers.not(org.hamcrest.CoreMatchers.not) EMPTY_CHANGELOG_END_OFFSETS(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.EMPTY_CHANGELOG_END_OFFSETS) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) AssignmentTestUtils.createMockAdminClientForAssignor(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.createMockAdminClientForAssignor) HashSet(java.util.HashSet) JoinWindows(org.apache.kafka.streams.kstream.JoinWindows) Admin(org.apache.kafka.clients.admin.Admin) Collections.singletonMap(java.util.Collections.singletonMap) UUID_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.UUID_1) HighAvailabilityTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.HighAvailabilityTaskAssignor) UUID_2(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.UUID_2) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Matchers.empty(org.hamcrest.Matchers.empty) Collections.emptySet(java.util.Collections.emptySet) TASK_1_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_1) TASK_1_0(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_0) TASK_1_3(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_3) TASK_1_2(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_2) EasyMock.expect(org.easymock.EasyMock.expect) ConfigException(org.apache.kafka.common.config.ConfigException) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) ClientState(org.apache.kafka.streams.processor.internals.assignment.ClientState) TaskAssignor(org.apache.kafka.streams.processor.internals.assignment.TaskAssignor) Matchers.anEmptyMap(org.hamcrest.Matchers.anEmptyMap) Collections(java.util.Collections) InternalStreamsBuilder(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 33 with KStream

use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.

the class PageViewUntypedDemo method main.

public static void main(final String[] args) throws Exception {
    final Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-pageview-untyped");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, JsonTimestampExtractor.class);
    props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    final StreamsBuilder builder = new StreamsBuilder();
    final Serializer<JsonNode> jsonSerializer = new JsonSerializer();
    final Deserializer<JsonNode> jsonDeserializer = new JsonDeserializer();
    final Serde<JsonNode> jsonSerde = Serdes.serdeFrom(jsonSerializer, jsonDeserializer);
    final Consumed<String, JsonNode> consumed = Consumed.with(Serdes.String(), jsonSerde);
    final KStream<String, JsonNode> views = builder.stream("streams-pageview-input", consumed);
    final KTable<String, JsonNode> users = builder.table("streams-userprofile-input", consumed);
    final KTable<String, String> userRegions = users.mapValues(record -> record.get("region").textValue());
    final Duration duration24Hours = Duration.ofHours(24);
    final KStream<JsonNode, JsonNode> regionCount = views.leftJoin(userRegions, (view, region) -> {
        final ObjectNode jNode = JsonNodeFactory.instance.objectNode();
        return (JsonNode) jNode.put("user", view.get("user").textValue()).put("page", view.get("page").textValue()).put("region", region == null ? "UNKNOWN" : region);
    }).map((user, viewRegion) -> new KeyValue<>(viewRegion.get("region").textValue(), viewRegion)).groupByKey(Grouped.with(Serdes.String(), jsonSerde)).windowedBy(TimeWindows.ofSizeAndGrace(Duration.ofDays(7), duration24Hours).advanceBy(Duration.ofSeconds(1))).count().toStream().map((key, value) -> {
        final ObjectNode keyNode = JsonNodeFactory.instance.objectNode();
        keyNode.put("window-start", key.window().start()).put("region", key.key());
        final ObjectNode valueNode = JsonNodeFactory.instance.objectNode();
        valueNode.put("count", value);
        return new KeyValue<>((JsonNode) keyNode, (JsonNode) valueNode);
    });
    // write to the result topic
    regionCount.to("streams-pageviewstats-untyped-output", Produced.with(jsonSerde, jsonSerde));
    final KafkaStreams streams = new KafkaStreams(builder.build(), props);
    streams.start();
    // usually the stream application would be running forever,
    // in this example we just let it run for some time and stop since the input data is finite.
    Thread.sleep(5000L);
    streams.close();
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsConfig(org.apache.kafka.streams.StreamsConfig) KTable(org.apache.kafka.streams.kstream.KTable) Properties(java.util.Properties) Produced(org.apache.kafka.streams.kstream.Produced) Consumed(org.apache.kafka.streams.kstream.Consumed) KeyValue(org.apache.kafka.streams.KeyValue) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) KStream(org.apache.kafka.streams.kstream.KStream) ObjectNode(com.fasterxml.jackson.databind.node.ObjectNode) Grouped(org.apache.kafka.streams.kstream.Grouped) JsonSerializer(org.apache.kafka.connect.json.JsonSerializer) JsonDeserializer(org.apache.kafka.connect.json.JsonDeserializer) JsonNodeFactory(com.fasterxml.jackson.databind.node.JsonNodeFactory) Serde(org.apache.kafka.common.serialization.Serde) Serializer(org.apache.kafka.common.serialization.Serializer) TimeWindows(org.apache.kafka.streams.kstream.TimeWindows) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) JsonNode(com.fasterxml.jackson.databind.JsonNode) Deserializer(org.apache.kafka.common.serialization.Deserializer) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) ObjectNode(com.fasterxml.jackson.databind.node.ObjectNode) JsonNode(com.fasterxml.jackson.databind.JsonNode) Duration(java.time.Duration) JsonSerializer(org.apache.kafka.connect.json.JsonSerializer) Properties(java.util.Properties) JsonDeserializer(org.apache.kafka.connect.json.JsonDeserializer) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder)

Example 34 with KStream

use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.

the class TemperatureDemo method main.

public static void main(final String[] args) {
    final Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-temperature");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    final Duration duration24Hours = Duration.ofHours(24);
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, String> source = builder.stream("iot-temperature");
    final KStream<Windowed<String>, String> max = source.selectKey((key, value) -> "temp").groupByKey().windowedBy(TimeWindows.ofSizeAndGrace(Duration.ofSeconds(TEMPERATURE_WINDOW_SIZE), duration24Hours)).reduce((value1, value2) -> {
        if (Integer.parseInt(value1) > Integer.parseInt(value2)) {
            return value1;
        } else {
            return value2;
        }
    }).toStream().filter((key, value) -> Integer.parseInt(value) > TEMPERATURE_THRESHOLD);
    final Serde<Windowed<String>> windowedSerde = WindowedSerdes.timeWindowedSerdeFrom(String.class, TEMPERATURE_WINDOW_SIZE);
    // need to override key serde to Windowed<String> type
    max.to("iot-temperature-max", Produced.with(windowedSerde, Serdes.String()));
    final KafkaStreams streams = new KafkaStreams(builder.build(), props);
    final CountDownLatch latch = new CountDownLatch(1);
    // attach shutdown handler to catch control-c
    Runtime.getRuntime().addShutdownHook(new Thread("streams-temperature-shutdown-hook") {

        @Override
        public void run() {
            streams.close();
            latch.countDown();
        }
    });
    try {
        streams.start();
        latch.await();
    } catch (final Throwable e) {
        System.exit(1);
    }
    System.exit(0);
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Properties(java.util.Properties) Produced(org.apache.kafka.streams.kstream.Produced) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) KStream(org.apache.kafka.streams.kstream.KStream) WindowedSerdes(org.apache.kafka.streams.kstream.WindowedSerdes) CountDownLatch(java.util.concurrent.CountDownLatch) Windowed(org.apache.kafka.streams.kstream.Windowed) Serde(org.apache.kafka.common.serialization.Serde) TimeWindows(org.apache.kafka.streams.kstream.TimeWindows) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Duration(java.time.Duration) Properties(java.util.Properties) CountDownLatch(java.util.concurrent.CountDownLatch) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Windowed(org.apache.kafka.streams.kstream.Windowed)

Example 35 with KStream

use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.

the class JoinStoreIntegrationTest method streamJoinChangelogTopicShouldBeConfiguredWithDeleteOnlyCleanupPolicy.

@Test
public void streamJoinChangelogTopicShouldBeConfiguredWithDeleteOnlyCleanupPolicy() throws Exception {
    STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID + "-changelog-cleanup-policy");
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, Integer> left = builder.stream(INPUT_TOPIC_LEFT, Consumed.with(Serdes.String(), Serdes.Integer()));
    final KStream<String, Integer> right = builder.stream(INPUT_TOPIC_RIGHT, Consumed.with(Serdes.String(), Serdes.Integer()));
    final CountDownLatch latch = new CountDownLatch(1);
    left.join(right, Integer::sum, JoinWindows.of(ofMillis(100)), StreamJoined.with(Serdes.String(), Serdes.Integer(), Serdes.Integer()).withStoreName("join-store"));
    try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), STREAMS_CONFIG);
        final Admin admin = Admin.create(ADMIN_CONFIG)) {
        kafkaStreams.setStateListener((newState, oldState) -> {
            if (newState == KafkaStreams.State.RUNNING) {
                latch.countDown();
            }
        });
        kafkaStreams.start();
        latch.await();
        final Collection<ConfigResource> changelogTopics = Stream.of("join-store-integration-test-changelog-cleanup-policy-join-store-this-join-store-changelog", "join-store-integration-test-changelog-cleanup-policy-join-store-other-join-store-changelog").map(name -> new ConfigResource(Type.TOPIC, name)).collect(Collectors.toList());
        final Map<ConfigResource, org.apache.kafka.clients.admin.Config> topicConfig = admin.describeConfigs(changelogTopics).all().get();
        topicConfig.values().forEach(tc -> assertThat(tc.get("cleanup.policy").value(), is("delete")));
    }
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) QueryableStoreTypes.keyValueStore(org.apache.kafka.streams.state.QueryableStoreTypes.keyValueStore) Assert.assertThrows(org.junit.Assert.assertThrows) IntegrationTest(org.apache.kafka.test.IntegrationTest) KStream(org.apache.kafka.streams.kstream.KStream) UnknownStateStoreException(org.apache.kafka.streams.errors.UnknownStateStoreException) StreamJoined(org.apache.kafka.streams.kstream.StreamJoined) ConfigResource(org.apache.kafka.common.config.ConfigResource) JoinWindows(org.apache.kafka.streams.kstream.JoinWindows) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) Map(java.util.Map) After(org.junit.After) Admin(org.apache.kafka.clients.admin.Admin) Serdes(org.apache.kafka.common.serialization.Serdes) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) StoreQueryParameters.fromNameAndType(org.apache.kafka.streams.StoreQueryParameters.fromNameAndType) Before(org.junit.Before) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AfterClass(org.junit.AfterClass) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) Consumed(org.apache.kafka.streams.kstream.Consumed) Collection(java.util.Collection) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Test(org.junit.Test) IOException(java.io.IOException) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) CountDownLatch(java.util.concurrent.CountDownLatch) Stream(java.util.stream.Stream) Rule(org.junit.Rule) Matchers.is(org.hamcrest.Matchers.is) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Duration.ofMillis(java.time.Duration.ofMillis) Type(org.apache.kafka.common.config.ConfigResource.Type) TemporaryFolder(org.junit.rules.TemporaryFolder) KafkaStreams(org.apache.kafka.streams.KafkaStreams) StreamsConfig(org.apache.kafka.streams.StreamsConfig) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) CountDownLatch(java.util.concurrent.CountDownLatch) Admin(org.apache.kafka.clients.admin.Admin) ConfigResource(org.apache.kafka.common.config.ConfigResource) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Aggregations

KStream (org.apache.kafka.streams.kstream.KStream)91 Serdes (org.apache.kafka.common.serialization.Serdes)83 Properties (java.util.Properties)82 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)80 Test (org.junit.Test)69 StreamsConfig (org.apache.kafka.streams.StreamsConfig)65 KeyValue (org.apache.kafka.streams.KeyValue)61 Consumed (org.apache.kafka.streams.kstream.Consumed)55 KTable (org.apache.kafka.streams.kstream.KTable)54 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)51 Materialized (org.apache.kafka.streams.kstream.Materialized)45 Duration (java.time.Duration)44 List (java.util.List)42 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)40 KafkaStreams (org.apache.kafka.streams.KafkaStreams)38 Arrays (java.util.Arrays)37 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)37 Assert.assertEquals (org.junit.Assert.assertEquals)37 Produced (org.apache.kafka.streams.kstream.Produced)36 Grouped (org.apache.kafka.streams.kstream.Grouped)35