use of org.apache.kafka.streams.ThreadMetadata in project kafka by apache.
the class ThreadMetadataImplTest method shouldNotBeEqualIfDifferInProducerClientIds.
@Test
public void shouldNotBeEqualIfDifferInProducerClientIds() {
final ThreadMetadata differProducerClientIds = new ThreadMetadataImpl(THREAD_NAME, THREAD_STATE, MAIN_CONSUMER_CLIENT_ID, RESTORE_CONSUMER_CLIENT_ID, mkSet(CLIENT_ID_1), ADMIN_CLIENT_ID, ACTIVE_TASKS, STANDBY_TASKS);
assertThat(threadMetadata, not(equalTo(differProducerClientIds)));
assertThat(threadMetadata.hashCode(), not(equalTo(differProducerClientIds.hashCode())));
}
use of org.apache.kafka.streams.ThreadMetadata in project kafka by apache.
the class ThreadMetadataImplTest method shouldNotBeEqualIfDifferInStandByTasks.
@Test
public void shouldNotBeEqualIfDifferInStandByTasks() {
final ThreadMetadata differStandByTasks = new ThreadMetadataImpl(THREAD_NAME, THREAD_STATE, MAIN_CONSUMER_CLIENT_ID, RESTORE_CONSUMER_CLIENT_ID, PRODUCER_CLIENT_IDS, ADMIN_CLIENT_ID, ACTIVE_TASKS, mkSet(TM_0));
assertThat(threadMetadata, not(equalTo(differStandByTasks)));
assertThat(threadMetadata.hashCode(), not(equalTo(differStandByTasks.hashCode())));
}
use of org.apache.kafka.streams.ThreadMetadata in project kafka by apache.
the class ThreadMetadataImplTest method shouldNotBeEqualIfDifferInConsumerClientId.
@Test
public void shouldNotBeEqualIfDifferInConsumerClientId() {
final ThreadMetadata differRestoreConsumerClientId = new ThreadMetadataImpl(THREAD_NAME, THREAD_STATE, MAIN_CONSUMER_CLIENT_ID, "different", PRODUCER_CLIENT_IDS, ADMIN_CLIENT_ID, ACTIVE_TASKS, STANDBY_TASKS);
assertThat(threadMetadata, not(equalTo(differRestoreConsumerClientId)));
assertThat(threadMetadata.hashCode(), not(equalTo(differRestoreConsumerClientId.hashCode())));
}
use of org.apache.kafka.streams.ThreadMetadata in project kafka by apache.
the class StreamsStandByReplicaTest method main.
public static void main(final String[] args) throws IOException {
if (args.length < 2) {
System.err.println("StreamsStandByReplicaTest are expecting two parameters: " + "propFile, additionalConfigs; but only see " + args.length + " parameter");
Exit.exit(1);
}
System.out.println("StreamsTest instance started");
final String propFileName = args[0];
final String additionalConfigs = args[1];
final Properties streamsProperties = Utils.loadProps(propFileName);
final String kafka = streamsProperties.getProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG);
if (kafka == null) {
System.err.println("No bootstrap kafka servers specified in " + StreamsConfig.BOOTSTRAP_SERVERS_CONFIG);
Exit.exit(1);
}
streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-streams-standby-tasks");
streamsProperties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
streamsProperties.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1);
streamsProperties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
streamsProperties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsProperties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsProperties.put(StreamsConfig.producerPrefix(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), true);
if (additionalConfigs == null) {
System.err.println("additional configs are not provided");
System.err.flush();
Exit.exit(1);
}
final Map<String, String> updated = SystemTestUtil.parseConfigs(additionalConfigs);
System.out.println("Updating configs with " + updated);
final String sourceTopic = updated.remove("sourceTopic");
final String sinkTopic1 = updated.remove("sinkTopic1");
final String sinkTopic2 = updated.remove("sinkTopic2");
if (sourceTopic == null || sinkTopic1 == null || sinkTopic2 == null) {
System.err.println(String.format("one or more required topics null sourceTopic[%s], sinkTopic1[%s], sinkTopic2[%s]", sourceTopic, sinkTopic1, sinkTopic2));
System.err.flush();
Exit.exit(1);
}
streamsProperties.putAll(updated);
if (!confirmCorrectConfigs(streamsProperties)) {
System.err.println(String.format("ERROR: Did not have all required configs expected to contain %s, %s, %s, %s", StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.RETRIES_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.MAX_BLOCK_MS_CONFIG)));
Exit.exit(1);
}
final StreamsBuilder builder = new StreamsBuilder();
final String inMemoryStoreName = "in-memory-store";
final String persistentMemoryStoreName = "persistent-memory-store";
final KeyValueBytesStoreSupplier inMemoryStoreSupplier = Stores.inMemoryKeyValueStore(inMemoryStoreName);
final KeyValueBytesStoreSupplier persistentStoreSupplier = Stores.persistentKeyValueStore(persistentMemoryStoreName);
final Serde<String> stringSerde = Serdes.String();
final ValueMapper<Long, String> countMapper = Object::toString;
final KStream<String, String> inputStream = builder.stream(sourceTopic, Consumed.with(stringSerde, stringSerde));
inputStream.groupByKey().count(Materialized.as(inMemoryStoreSupplier)).toStream().mapValues(countMapper).to(sinkTopic1, Produced.with(stringSerde, stringSerde));
inputStream.groupByKey().count(Materialized.as(persistentStoreSupplier)).toStream().mapValues(countMapper).to(sinkTopic2, Produced.with(stringSerde, stringSerde));
final KafkaStreams streams = new KafkaStreams(builder.build(), streamsProperties);
streams.setUncaughtExceptionHandler(e -> {
System.err.println("FATAL: An unexpected exception " + e);
e.printStackTrace(System.err);
System.err.flush();
return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT;
});
streams.setStateListener((newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) {
final Set<ThreadMetadata> threadMetadata = streams.metadataForLocalThreads();
for (final ThreadMetadata threadMetadatum : threadMetadata) {
System.out.println("ACTIVE_TASKS:" + threadMetadatum.activeTasks().size() + " STANDBY_TASKS:" + threadMetadatum.standbyTasks().size());
}
}
});
System.out.println("Start Kafka Streams");
streams.start();
Exit.addShutdownHook("streams-shutdown-hook", () -> {
shutdown(streams);
System.out.println("Shut down streams now");
});
}
use of org.apache.kafka.streams.ThreadMetadata in project kafka by apache.
the class AdjustStreamThreadCountTest method shouldAddStreamThread.
@Test
public void shouldAddStreamThread() throws Exception {
try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), properties)) {
addStreamStateChangeListener(kafkaStreams);
startStreamsAndWaitForRunning(kafkaStreams);
final int oldThreadCount = kafkaStreams.metadataForLocalThreads().size();
assertThat(kafkaStreams.metadataForLocalThreads().stream().map(t -> t.threadName().split("-StreamThread-")[1]).sorted().toArray(), equalTo(new String[] { "1", "2" }));
stateTransitionHistory.clear();
final Optional<String> name = kafkaStreams.addStreamThread();
assertThat(name, not(Optional.empty()));
TestUtils.waitForCondition(() -> kafkaStreams.metadataForLocalThreads().stream().sequential().map(ThreadMetadata::threadName).anyMatch(t -> t.equals(name.orElse(""))), "Wait for the thread to be added");
assertThat(kafkaStreams.metadataForLocalThreads().size(), equalTo(oldThreadCount + 1));
assertThat(kafkaStreams.metadataForLocalThreads().stream().map(t -> t.threadName().split("-StreamThread-")[1]).sorted().toArray(), equalTo(new String[] { "1", "2", "3" }));
waitForTransitionFromRebalancingToRunning();
}
}
Aggregations