use of org.apache.kafka.streams.processor.ThreadMetadata in project apache-kafka-on-k8s by banzaicloud.
the class KafkaStreams method localThreadsMetadata.
/**
* Returns runtime information about the local threads of this {@link KafkaStreams} instance.
*
* @return the set of {@link ThreadMetadata}.
*/
public Set<ThreadMetadata> localThreadsMetadata() {
validateIsRunning();
final Set<ThreadMetadata> threadMetadata = new HashSet<>();
for (StreamThread thread : threads) {
threadMetadata.add(thread.threadMetadata());
}
return threadMetadata;
}
use of org.apache.kafka.streams.processor.ThreadMetadata in project apache-kafka-on-k8s by banzaicloud.
the class StreamsStandByReplicaTest method main.
public static void main(final String[] args) throws IOException {
System.out.println("StreamsTest instance started");
final String kafka = args.length > 0 ? args[0] : "localhost:9092";
final String propFileName = args.length > 1 ? args[1] : null;
final String additionalConfigs = args.length > 2 ? args[2] : null;
final Serde<String> stringSerde = Serdes.String();
final Properties streamsProperties = Utils.loadProps(propFileName);
streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-streams-standby-tasks");
streamsProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
streamsProperties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100);
streamsProperties.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1);
streamsProperties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
streamsProperties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsProperties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsProperties.put(StreamsConfig.producerPrefix(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), true);
if (additionalConfigs == null) {
System.err.println("additional configs are not provided");
System.err.flush();
System.exit(1);
}
final Map<String, String> updated = SystemTestUtil.parseConfigs(additionalConfigs);
System.out.println("Updating configs with " + updated);
final String sourceTopic = updated.remove("sourceTopic");
final String sinkTopic1 = updated.remove("sinkTopic1");
final String sinkTopic2 = updated.remove("sinkTopic2");
if (sourceTopic == null || sinkTopic1 == null || sinkTopic2 == null) {
System.err.println(String.format("one or more required topics null sourceTopic[%s], sinkTopic1[%s], sinkTopic2[%s]", sourceTopic, sinkTopic1, sinkTopic2));
System.err.flush();
System.exit(1);
}
streamsProperties.putAll(updated);
if (!confirmCorrectConfigs(streamsProperties)) {
System.err.println(String.format("ERROR: Did not have all required configs expected to contain %s, %s, %s, %s", StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.RETRIES_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.MAX_BLOCK_MS_CONFIG)));
System.exit(1);
}
final StreamsBuilder builder = new StreamsBuilder();
String inMemoryStoreName = "in-memory-store";
String persistentMemoryStoreName = "persistent-memory-store";
KeyValueBytesStoreSupplier inMemoryStoreSupplier = Stores.inMemoryKeyValueStore(inMemoryStoreName);
KeyValueBytesStoreSupplier persistentStoreSupplier = Stores.persistentKeyValueStore(persistentMemoryStoreName);
KStream<String, String> inputStream = builder.stream(sourceTopic, Consumed.with(stringSerde, stringSerde));
ValueMapper<Long, String> countMapper = new ValueMapper<Long, String>() {
@Override
public String apply(final Long value) {
return value.toString();
}
};
inputStream.groupByKey().count(Materialized.<String, Long>as(inMemoryStoreSupplier)).toStream().mapValues(countMapper).to(sinkTopic1, Produced.with(stringSerde, stringSerde));
inputStream.groupByKey().count(Materialized.<String, Long>as(persistentStoreSupplier)).toStream().mapValues(countMapper).to(sinkTopic2, Produced.with(stringSerde, stringSerde));
final KafkaStreams streams = new KafkaStreams(builder.build(), streamsProperties);
streams.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(final Thread t, final Throwable e) {
System.err.println("FATAL: An unexpected exception " + e);
e.printStackTrace(System.err);
System.err.flush();
shutdown(streams);
}
});
streams.setStateListener(new KafkaStreams.StateListener() {
@Override
public void onChange(final KafkaStreams.State newState, final KafkaStreams.State oldState) {
if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) {
final Set<ThreadMetadata> threadMetadata = streams.localThreadsMetadata();
for (final ThreadMetadata threadMetadatum : threadMetadata) {
System.out.println("ACTIVE_TASKS:" + threadMetadatum.activeTasks().size() + " STANDBY_TASKS:" + threadMetadatum.standbyTasks().size());
}
}
}
});
System.out.println("Start Kafka Streams");
streams.start();
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
@Override
public void run() {
shutdown(streams);
System.out.println("Shut down streams now");
}
}));
}
use of org.apache.kafka.streams.processor.ThreadMetadata in project apache-kafka-on-k8s by banzaicloud.
the class StreamThreadTest method shouldReturnStandbyTaskMetadataWhileRunningState.
@Test
public void shouldReturnStandbyTaskMetadataWhileRunningState() {
internalStreamsBuilder.stream(Collections.singleton(topic1), consumed).groupByKey().count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>as("count-one"));
final StreamThread thread = createStreamThread(clientId, config, false);
final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer;
restoreConsumer.updatePartitions("stream-thread-test-count-one-changelog", Collections.singletonList(new PartitionInfo("stream-thread-test-count-one-changelog", 0, null, new Node[0], new Node[0])));
final HashMap<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(new TopicPartition("stream-thread-test-count-one-changelog", 1), 0L);
restoreConsumer.updateEndOffsets(offsets);
restoreConsumer.updateBeginningOffsets(offsets);
thread.setState(StreamThread.State.RUNNING);
thread.rebalanceListener.onPartitionsRevoked(null);
final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
// assign single partition
standbyTasks.put(task1, Collections.singleton(t1p1));
thread.taskManager().setAssignmentMetadata(Collections.<TaskId, Set<TopicPartition>>emptyMap(), standbyTasks);
thread.rebalanceListener.onPartitionsAssigned(Collections.<TopicPartition>emptyList());
thread.runOnce(-1);
ThreadMetadata threadMetadata = thread.threadMetadata();
assertEquals(StreamThread.State.RUNNING.name(), threadMetadata.threadState());
assertTrue(threadMetadata.standbyTasks().contains(new TaskMetadata(task1.toString(), Utils.mkSet(t1p1))));
assertTrue(threadMetadata.activeTasks().isEmpty());
}
use of org.apache.kafka.streams.processor.ThreadMetadata in project apache-kafka-on-k8s by banzaicloud.
the class StreamThread method updateThreadMetadata.
private void updateThreadMetadata(final Map<TaskId, StreamTask> activeTasks, final Map<TaskId, StandbyTask> standbyTasks) {
final Set<TaskMetadata> activeTasksMetadata = new HashSet<>();
for (Map.Entry<TaskId, StreamTask> task : activeTasks.entrySet()) {
activeTasksMetadata.add(new TaskMetadata(task.getKey().toString(), task.getValue().partitions()));
}
final Set<TaskMetadata> standbyTasksMetadata = new HashSet<>();
for (Map.Entry<TaskId, StandbyTask> task : standbyTasks.entrySet()) {
standbyTasksMetadata.add(new TaskMetadata(task.getKey().toString(), task.getValue().partitions()));
}
threadMetadata = new ThreadMetadata(this.getName(), this.state().name(), activeTasksMetadata, standbyTasksMetadata);
}
use of org.apache.kafka.streams.processor.ThreadMetadata in project apache-kafka-on-k8s by banzaicloud.
the class KafkaStreamsTest method shouldReturnThreadMetadata.
@Test
public void shouldReturnThreadMetadata() {
streams.start();
Set<ThreadMetadata> threadMetadata = streams.localThreadsMetadata();
assertNotNull(threadMetadata);
assertEquals(2, threadMetadata.size());
for (ThreadMetadata metadata : threadMetadata) {
assertTrue("#threadState() was: " + metadata.threadState() + "; expected either RUNNING, PARTITIONS_REVOKED, PARTITIONS_ASSIGNED, or CREATED", Utils.mkList("RUNNING", "PARTITIONS_REVOKED", "PARTITIONS_ASSIGNED", "CREATED").contains(metadata.threadState()));
assertEquals(0, metadata.standbyTasks().size());
assertEquals(0, metadata.activeTasks().size());
}
streams.close();
}
Aggregations