Search in sources :

Example 1 with ThreadMetadata

use of org.apache.kafka.streams.ThreadMetadata in project kafka by apache.

the class AdjustStreamThreadCountTest method shouldAddAndRemoveStreamThreadsWhileKeepingNamesCorrect.

@Test
public void shouldAddAndRemoveStreamThreadsWhileKeepingNamesCorrect() throws Exception {
    try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), properties)) {
        addStreamStateChangeListener(kafkaStreams);
        startStreamsAndWaitForRunning(kafkaStreams);
        int oldThreadCount = kafkaStreams.metadataForLocalThreads().size();
        stateTransitionHistory.clear();
        assertThat(kafkaStreams.metadataForLocalThreads().stream().map(t -> t.threadName().split("-StreamThread-")[1]).sorted().toArray(), equalTo(new String[] { "1", "2" }));
        final Optional<String> name = kafkaStreams.addStreamThread();
        assertThat("New thread has index 3", "3".equals(name.get().split("-StreamThread-")[1]));
        TestUtils.waitForCondition(() -> kafkaStreams.metadataForLocalThreads().stream().sequential().map(ThreadMetadata::threadName).anyMatch(t -> t.equals(name.get())), "Stream thread has not been added");
        assertThat(kafkaStreams.metadataForLocalThreads().size(), equalTo(oldThreadCount + 1));
        assertThat(kafkaStreams.metadataForLocalThreads().stream().map(t -> t.threadName().split("-StreamThread-")[1]).sorted().toArray(), equalTo(new String[] { "1", "2", "3" }));
        waitForTransitionFromRebalancingToRunning();
        oldThreadCount = kafkaStreams.metadataForLocalThreads().size();
        stateTransitionHistory.clear();
        final Optional<String> removedThread = kafkaStreams.removeStreamThread();
        assertThat(removedThread, not(Optional.empty()));
        assertThat(kafkaStreams.metadataForLocalThreads().size(), equalTo(oldThreadCount - 1));
        waitForTransitionFromRebalancingToRunning();
        stateTransitionHistory.clear();
        final Optional<String> name2 = kafkaStreams.addStreamThread();
        assertThat(name2, not(Optional.empty()));
        TestUtils.waitForCondition(() -> kafkaStreams.metadataForLocalThreads().stream().sequential().map(ThreadMetadata::threadName).anyMatch(t -> t.equals(name2.orElse(""))), "Wait for the thread to be added");
        assertThat(kafkaStreams.metadataForLocalThreads().size(), equalTo(oldThreadCount));
        assertThat(kafkaStreams.metadataForLocalThreads().stream().map(t -> t.threadName().split("-StreamThread-")[1]).sorted().toArray(), equalTo(new String[] { "1", "2", "3" }));
        assertThat("the new thread should have received the old threads name", name2.equals(removedThread));
        waitForTransitionFromRebalancingToRunning();
    }
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) Assert.fail(org.junit.Assert.fail) AfterClass(org.junit.AfterClass) ThreadMetadata(org.apache.kafka.streams.ThreadMetadata) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkObjectProperties(org.apache.kafka.common.utils.Utils.mkObjectProperties) KeyValue(org.apache.kafka.streams.KeyValue) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Category(org.junit.experimental.categories.Category) Executors(java.util.concurrent.Executors) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) CountDownLatch(java.util.concurrent.CountDownLatch) IntegrationTestUtils.purgeLocalStreamsState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.purgeLocalStreamsState) List(java.util.List) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Optional(java.util.Optional) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) Assert.assertThrows(org.junit.Assert.assertThrows) CoreMatchers.not(org.hamcrest.CoreMatchers.not) IntegrationTest(org.apache.kafka.test.IntegrationTest) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) KStream(org.apache.kafka.streams.kstream.KStream) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) PunctuationType(org.apache.kafka.streams.processor.PunctuationType) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) ExecutorService(java.util.concurrent.ExecutorService) Before(org.junit.Before) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Properties(java.util.Properties) Transformer(org.apache.kafka.streams.kstream.Transformer) TestUtils.waitForCondition(org.apache.kafka.test.TestUtils.waitForCondition) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) TimeUnit(java.util.concurrent.TimeUnit) Rule(org.junit.Rule) Assert.assertNull(org.junit.Assert.assertNull) StreamThreadExceptionResponse(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Assert.assertEquals(org.junit.Assert.assertEquals) KafkaStreams(org.apache.kafka.streams.KafkaStreams) ThreadMetadata(org.apache.kafka.streams.ThreadMetadata) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 2 with ThreadMetadata

use of org.apache.kafka.streams.ThreadMetadata in project kafka by apache.

the class StreamThreadTest method shouldReturnActiveTaskMetadataWhileRunningState.

@Test
public void shouldReturnActiveTaskMetadataWhileRunningState() {
    internalTopologyBuilder.addSource(null, "source", null, null, null, topic1);
    clientSupplier.setCluster(createCluster());
    final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, APPLICATION_ID, config.getString(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG), mockTime);
    final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
    topologyMetadata.buildAndRewriteTopology();
    final StreamThread thread = StreamThread.create(topologyMetadata, config, clientSupplier, clientSupplier.getAdmin(config.getAdminConfigs(CLIENT_ID)), PROCESS_ID, CLIENT_ID, streamsMetrics, mockTime, streamsMetadataState, 0, stateDirectory, new MockStateRestoreListener(), threadIdx, null, HANDLER);
    thread.setState(StreamThread.State.STARTING);
    thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    activeTasks.put(task1, Collections.singleton(t1p1));
    thread.taskManager().handleAssignment(activeTasks, emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
    mockConsumer.assign(assignedPartitions);
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
    thread.runOnce();
    final ThreadMetadata metadata = thread.threadMetadata();
    assertEquals(StreamThread.State.RUNNING.name(), metadata.threadState());
    assertTrue(metadata.activeTasks().contains(new TaskMetadataImpl(task1, Utils.mkSet(t1p1), new HashMap<>(), new HashMap<>(), Optional.empty())));
    assertTrue(metadata.standbyTasks().isEmpty());
    assertTrue("#threadState() was: " + metadata.threadState() + "; expected either RUNNING, STARTING, PARTITIONS_REVOKED, PARTITIONS_ASSIGNED, or CREATED", Arrays.asList("RUNNING", "STARTING", "PARTITIONS_REVOKED", "PARTITIONS_ASSIGNED", "CREATED").contains(metadata.threadState()));
    final String threadName = metadata.threadName();
    assertThat(threadName, startsWith(CLIENT_ID + "-StreamThread-" + threadIdx));
    assertEquals(threadName + "-consumer", metadata.consumerClientId());
    assertEquals(threadName + "-restore-consumer", metadata.restoreConsumerClientId());
    assertEquals(Collections.singleton(threadName + "-producer"), metadata.producerClientIds());
    assertEquals(CLIENT_ID + "-admin", metadata.adminClientId());
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) MockStateRestoreListener(org.apache.kafka.test.MockStateRestoreListener) TopicPartition(org.apache.kafka.common.TopicPartition) ThreadMetadata(org.apache.kafka.streams.ThreadMetadata) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Test(org.junit.Test)

Example 3 with ThreadMetadata

use of org.apache.kafka.streams.ThreadMetadata in project kafka by apache.

the class StreamThreadTest method shouldReturnStandbyTaskMetadataWhileRunningState.

@Test
public void shouldReturnStandbyTaskMetadataWhileRunningState() {
    internalStreamsBuilder.stream(Collections.singleton(topic1), consumed).groupByKey().count(Materialized.as("count-one"));
    internalStreamsBuilder.buildAndOptimizeTopology();
    final StreamThread thread = createStreamThread(CLIENT_ID, config, false);
    final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer;
    restoreConsumer.updatePartitions("stream-thread-test-count-one-changelog", Collections.singletonList(new PartitionInfo("stream-thread-test-count-one-changelog", 0, null, new Node[0], new Node[0])));
    final HashMap<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(new TopicPartition("stream-thread-test-count-one-changelog", 1), 0L);
    restoreConsumer.updateEndOffsets(offsets);
    restoreConsumer.updateBeginningOffsets(offsets);
    thread.setState(StreamThread.State.STARTING);
    thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
    final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
    // assign single partition
    standbyTasks.put(task1, Collections.singleton(t1p1));
    thread.taskManager().handleAssignment(emptyMap(), standbyTasks);
    thread.rebalanceListener().onPartitionsAssigned(Collections.emptyList());
    thread.runOnce();
    final ThreadMetadata threadMetadata = thread.threadMetadata();
    assertEquals(StreamThread.State.RUNNING.name(), threadMetadata.threadState());
    assertTrue(threadMetadata.standbyTasks().contains(new TaskMetadataImpl(task1, Utils.mkSet(t1p1), new HashMap<>(), new HashMap<>(), Optional.empty())));
    assertTrue(threadMetadata.activeTasks().isEmpty());
    thread.taskManager().shutdown(true);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) ThreadMetadata(org.apache.kafka.streams.ThreadMetadata) AtomicLong(java.util.concurrent.atomic.AtomicLong) PartitionInfo(org.apache.kafka.common.PartitionInfo) Test(org.junit.Test)

Example 4 with ThreadMetadata

use of org.apache.kafka.streams.ThreadMetadata in project kafka by apache.

the class ThreadMetadataImplTest method shouldBeEqualIfSameObject.

@Test
public void shouldBeEqualIfSameObject() {
    final ThreadMetadata same = new ThreadMetadataImpl(THREAD_NAME, THREAD_STATE, MAIN_CONSUMER_CLIENT_ID, RESTORE_CONSUMER_CLIENT_ID, PRODUCER_CLIENT_IDS, ADMIN_CLIENT_ID, ACTIVE_TASKS, STANDBY_TASKS);
    assertThat(threadMetadata, equalTo(same));
    assertThat(threadMetadata.hashCode(), equalTo(same.hashCode()));
}
Also used : ThreadMetadata(org.apache.kafka.streams.ThreadMetadata) Test(org.junit.Test)

Example 5 with ThreadMetadata

use of org.apache.kafka.streams.ThreadMetadata in project kafka by apache.

the class ThreadMetadataImplTest method shouldNotBeEqualIfDifferInThreadName.

@Test
public void shouldNotBeEqualIfDifferInThreadName() {
    final ThreadMetadata differThreadName = new ThreadMetadataImpl("different", THREAD_STATE, MAIN_CONSUMER_CLIENT_ID, RESTORE_CONSUMER_CLIENT_ID, PRODUCER_CLIENT_IDS, ADMIN_CLIENT_ID, ACTIVE_TASKS, STANDBY_TASKS);
    assertThat(threadMetadata, not(equalTo(differThreadName)));
    assertThat(threadMetadata.hashCode(), not(equalTo(differThreadName.hashCode())));
}
Also used : ThreadMetadata(org.apache.kafka.streams.ThreadMetadata) Test(org.junit.Test)

Aggregations

ThreadMetadata (org.apache.kafka.streams.ThreadMetadata)15 Test (org.junit.Test)14 ArrayList (java.util.ArrayList)3 Properties (java.util.Properties)3 KafkaStreams (org.apache.kafka.streams.KafkaStreams)3 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)3 IOException (java.io.IOException)2 Duration (java.time.Duration)2 Collections.emptySet (java.util.Collections.emptySet)2 HashMap (java.util.HashMap)2 HashSet (java.util.HashSet)2 List (java.util.List)2 Optional (java.util.Optional)2 Set (java.util.Set)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 ExecutorService (java.util.concurrent.ExecutorService)2 Executors (java.util.concurrent.Executors)2 TimeUnit (java.util.concurrent.TimeUnit)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 AtomicReference (java.util.concurrent.atomic.AtomicReference)2