Search in sources :

Example 1 with ThreadMetadata

use of org.apache.kafka.streams.processor.ThreadMetadata in project apache-kafka-on-k8s by banzaicloud.

the class KafkaStreams method localThreadsMetadata.

/**
 * Returns runtime information about the local threads of this {@link KafkaStreams} instance.
 *
 * @return the set of {@link ThreadMetadata}.
 */
public Set<ThreadMetadata> localThreadsMetadata() {
    validateIsRunning();
    final Set<ThreadMetadata> threadMetadata = new HashSet<>();
    for (StreamThread thread : threads) {
        threadMetadata.add(thread.threadMetadata());
    }
    return threadMetadata;
}
Also used : GlobalStreamThread(org.apache.kafka.streams.processor.internals.GlobalStreamThread) StreamThread(org.apache.kafka.streams.processor.internals.StreamThread) ThreadMetadata(org.apache.kafka.streams.processor.ThreadMetadata) HashSet(java.util.HashSet)

Example 2 with ThreadMetadata

use of org.apache.kafka.streams.processor.ThreadMetadata in project apache-kafka-on-k8s by banzaicloud.

the class StreamsStandByReplicaTest method main.

public static void main(final String[] args) throws IOException {
    System.out.println("StreamsTest instance started");
    final String kafka = args.length > 0 ? args[0] : "localhost:9092";
    final String propFileName = args.length > 1 ? args[1] : null;
    final String additionalConfigs = args.length > 2 ? args[2] : null;
    final Serde<String> stringSerde = Serdes.String();
    final Properties streamsProperties = Utils.loadProps(propFileName);
    streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-streams-standby-tasks");
    streamsProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
    streamsProperties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100);
    streamsProperties.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1);
    streamsProperties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    streamsProperties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    streamsProperties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    streamsProperties.put(StreamsConfig.producerPrefix(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), true);
    if (additionalConfigs == null) {
        System.err.println("additional configs are not provided");
        System.err.flush();
        System.exit(1);
    }
    final Map<String, String> updated = SystemTestUtil.parseConfigs(additionalConfigs);
    System.out.println("Updating configs with " + updated);
    final String sourceTopic = updated.remove("sourceTopic");
    final String sinkTopic1 = updated.remove("sinkTopic1");
    final String sinkTopic2 = updated.remove("sinkTopic2");
    if (sourceTopic == null || sinkTopic1 == null || sinkTopic2 == null) {
        System.err.println(String.format("one or more required topics null sourceTopic[%s], sinkTopic1[%s], sinkTopic2[%s]", sourceTopic, sinkTopic1, sinkTopic2));
        System.err.flush();
        System.exit(1);
    }
    streamsProperties.putAll(updated);
    if (!confirmCorrectConfigs(streamsProperties)) {
        System.err.println(String.format("ERROR: Did not have all required configs expected  to contain %s, %s,  %s,  %s", StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.RETRIES_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.MAX_BLOCK_MS_CONFIG)));
        System.exit(1);
    }
    final StreamsBuilder builder = new StreamsBuilder();
    String inMemoryStoreName = "in-memory-store";
    String persistentMemoryStoreName = "persistent-memory-store";
    KeyValueBytesStoreSupplier inMemoryStoreSupplier = Stores.inMemoryKeyValueStore(inMemoryStoreName);
    KeyValueBytesStoreSupplier persistentStoreSupplier = Stores.persistentKeyValueStore(persistentMemoryStoreName);
    KStream<String, String> inputStream = builder.stream(sourceTopic, Consumed.with(stringSerde, stringSerde));
    ValueMapper<Long, String> countMapper = new ValueMapper<Long, String>() {

        @Override
        public String apply(final Long value) {
            return value.toString();
        }
    };
    inputStream.groupByKey().count(Materialized.<String, Long>as(inMemoryStoreSupplier)).toStream().mapValues(countMapper).to(sinkTopic1, Produced.with(stringSerde, stringSerde));
    inputStream.groupByKey().count(Materialized.<String, Long>as(persistentStoreSupplier)).toStream().mapValues(countMapper).to(sinkTopic2, Produced.with(stringSerde, stringSerde));
    final KafkaStreams streams = new KafkaStreams(builder.build(), streamsProperties);
    streams.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {

        @Override
        public void uncaughtException(final Thread t, final Throwable e) {
            System.err.println("FATAL: An unexpected exception " + e);
            e.printStackTrace(System.err);
            System.err.flush();
            shutdown(streams);
        }
    });
    streams.setStateListener(new KafkaStreams.StateListener() {

        @Override
        public void onChange(final KafkaStreams.State newState, final KafkaStreams.State oldState) {
            if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) {
                final Set<ThreadMetadata> threadMetadata = streams.localThreadsMetadata();
                for (final ThreadMetadata threadMetadatum : threadMetadata) {
                    System.out.println("ACTIVE_TASKS:" + threadMetadatum.activeTasks().size() + " STANDBY_TASKS:" + threadMetadatum.standbyTasks().size());
                }
            }
        }
    });
    System.out.println("Start Kafka Streams");
    streams.start();
    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {

        @Override
        public void run() {
            shutdown(streams);
            System.out.println("Shut down streams now");
        }
    }));
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) Set(java.util.Set) ValueMapper(org.apache.kafka.streams.kstream.ValueMapper) Properties(java.util.Properties) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KeyValueBytesStoreSupplier(org.apache.kafka.streams.state.KeyValueBytesStoreSupplier) ThreadMetadata(org.apache.kafka.streams.processor.ThreadMetadata)

Example 3 with ThreadMetadata

use of org.apache.kafka.streams.processor.ThreadMetadata in project apache-kafka-on-k8s by banzaicloud.

the class StreamThreadTest method shouldReturnStandbyTaskMetadataWhileRunningState.

@Test
public void shouldReturnStandbyTaskMetadataWhileRunningState() {
    internalStreamsBuilder.stream(Collections.singleton(topic1), consumed).groupByKey().count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>as("count-one"));
    final StreamThread thread = createStreamThread(clientId, config, false);
    final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer;
    restoreConsumer.updatePartitions("stream-thread-test-count-one-changelog", Collections.singletonList(new PartitionInfo("stream-thread-test-count-one-changelog", 0, null, new Node[0], new Node[0])));
    final HashMap<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(new TopicPartition("stream-thread-test-count-one-changelog", 1), 0L);
    restoreConsumer.updateEndOffsets(offsets);
    restoreConsumer.updateBeginningOffsets(offsets);
    thread.setState(StreamThread.State.RUNNING);
    thread.rebalanceListener.onPartitionsRevoked(null);
    final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
    // assign single partition
    standbyTasks.put(task1, Collections.singleton(t1p1));
    thread.taskManager().setAssignmentMetadata(Collections.<TaskId, Set<TopicPartition>>emptyMap(), standbyTasks);
    thread.rebalanceListener.onPartitionsAssigned(Collections.<TopicPartition>emptyList());
    thread.runOnce(-1);
    ThreadMetadata threadMetadata = thread.threadMetadata();
    assertEquals(StreamThread.State.RUNNING.name(), threadMetadata.threadState());
    assertTrue(threadMetadata.standbyTasks().contains(new TaskMetadata(task1.toString(), Utils.mkSet(t1p1))));
    assertTrue(threadMetadata.activeTasks().isEmpty());
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) TaskMetadata(org.apache.kafka.streams.processor.TaskMetadata) Bytes(org.apache.kafka.common.utils.Bytes) TopicPartition(org.apache.kafka.common.TopicPartition) ThreadMetadata(org.apache.kafka.streams.processor.ThreadMetadata) PartitionInfo(org.apache.kafka.common.PartitionInfo) InternalStreamsBuilderTest(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest) Test(org.junit.Test)

Example 4 with ThreadMetadata

use of org.apache.kafka.streams.processor.ThreadMetadata in project apache-kafka-on-k8s by banzaicloud.

the class StreamThread method updateThreadMetadata.

private void updateThreadMetadata(final Map<TaskId, StreamTask> activeTasks, final Map<TaskId, StandbyTask> standbyTasks) {
    final Set<TaskMetadata> activeTasksMetadata = new HashSet<>();
    for (Map.Entry<TaskId, StreamTask> task : activeTasks.entrySet()) {
        activeTasksMetadata.add(new TaskMetadata(task.getKey().toString(), task.getValue().partitions()));
    }
    final Set<TaskMetadata> standbyTasksMetadata = new HashSet<>();
    for (Map.Entry<TaskId, StandbyTask> task : standbyTasks.entrySet()) {
        standbyTasksMetadata.add(new TaskMetadata(task.getKey().toString(), task.getValue().partitions()));
    }
    threadMetadata = new ThreadMetadata(this.getName(), this.state().name(), activeTasksMetadata, standbyTasksMetadata);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) TaskMetadata(org.apache.kafka.streams.processor.TaskMetadata) ThreadMetadata(org.apache.kafka.streams.processor.ThreadMetadata) HashMap(java.util.HashMap) Map(java.util.Map) HashSet(java.util.HashSet)

Example 5 with ThreadMetadata

use of org.apache.kafka.streams.processor.ThreadMetadata in project apache-kafka-on-k8s by banzaicloud.

the class KafkaStreamsTest method shouldReturnThreadMetadata.

@Test
public void shouldReturnThreadMetadata() {
    streams.start();
    Set<ThreadMetadata> threadMetadata = streams.localThreadsMetadata();
    assertNotNull(threadMetadata);
    assertEquals(2, threadMetadata.size());
    for (ThreadMetadata metadata : threadMetadata) {
        assertTrue("#threadState() was: " + metadata.threadState() + "; expected either RUNNING, PARTITIONS_REVOKED, PARTITIONS_ASSIGNED, or CREATED", Utils.mkList("RUNNING", "PARTITIONS_REVOKED", "PARTITIONS_ASSIGNED", "CREATED").contains(metadata.threadState()));
        assertEquals(0, metadata.standbyTasks().size());
        assertEquals(0, metadata.activeTasks().size());
    }
    streams.close();
}
Also used : ThreadMetadata(org.apache.kafka.streams.processor.ThreadMetadata) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Aggregations

ThreadMetadata (org.apache.kafka.streams.processor.ThreadMetadata)7 HashSet (java.util.HashSet)4 Test (org.junit.Test)4 HashMap (java.util.HashMap)3 Set (java.util.Set)3 InternalStreamsBuilderTest (org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest)3 TaskId (org.apache.kafka.streams.processor.TaskId)3 TaskMetadata (org.apache.kafka.streams.processor.TaskMetadata)3 TopicPartition (org.apache.kafka.common.TopicPartition)2 ArrayList (java.util.ArrayList)1 Map (java.util.Map)1 Properties (java.util.Properties)1 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)1 PartitionInfo (org.apache.kafka.common.PartitionInfo)1 Bytes (org.apache.kafka.common.utils.Bytes)1 KafkaStreams (org.apache.kafka.streams.KafkaStreams)1 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)1 ValueMapper (org.apache.kafka.streams.kstream.ValueMapper)1 GlobalStreamThread (org.apache.kafka.streams.processor.internals.GlobalStreamThread)1 StreamThread (org.apache.kafka.streams.processor.internals.StreamThread)1