Search in sources :

Example 6 with StreamsMetricsImpl

use of org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl in project kafka by apache.

the class StreamThreadTest method shouldReturnActiveTaskMetadataWhileRunningState.

@Test
public void shouldReturnActiveTaskMetadataWhileRunningState() {
    internalTopologyBuilder.addSource(null, "source", null, null, null, topic1);
    clientSupplier.setCluster(createCluster());
    final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, APPLICATION_ID, config.getString(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG), mockTime);
    final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
    topologyMetadata.buildAndRewriteTopology();
    final StreamThread thread = StreamThread.create(topologyMetadata, config, clientSupplier, clientSupplier.getAdmin(config.getAdminConfigs(CLIENT_ID)), PROCESS_ID, CLIENT_ID, streamsMetrics, mockTime, streamsMetadataState, 0, stateDirectory, new MockStateRestoreListener(), threadIdx, null, HANDLER);
    thread.setState(StreamThread.State.STARTING);
    thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    activeTasks.put(task1, Collections.singleton(t1p1));
    thread.taskManager().handleAssignment(activeTasks, emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
    mockConsumer.assign(assignedPartitions);
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
    thread.runOnce();
    final ThreadMetadata metadata = thread.threadMetadata();
    assertEquals(StreamThread.State.RUNNING.name(), metadata.threadState());
    assertTrue(metadata.activeTasks().contains(new TaskMetadataImpl(task1, Utils.mkSet(t1p1), new HashMap<>(), new HashMap<>(), Optional.empty())));
    assertTrue(metadata.standbyTasks().isEmpty());
    assertTrue("#threadState() was: " + metadata.threadState() + "; expected either RUNNING, STARTING, PARTITIONS_REVOKED, PARTITIONS_ASSIGNED, or CREATED", Arrays.asList("RUNNING", "STARTING", "PARTITIONS_REVOKED", "PARTITIONS_ASSIGNED", "CREATED").contains(metadata.threadState()));
    final String threadName = metadata.threadName();
    assertThat(threadName, startsWith(CLIENT_ID + "-StreamThread-" + threadIdx));
    assertEquals(threadName + "-consumer", metadata.consumerClientId());
    assertEquals(threadName + "-restore-consumer", metadata.restoreConsumerClientId());
    assertEquals(Collections.singleton(threadName + "-producer"), metadata.producerClientIds());
    assertEquals(CLIENT_ID + "-admin", metadata.adminClientId());
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) MockStateRestoreListener(org.apache.kafka.test.MockStateRestoreListener) TopicPartition(org.apache.kafka.common.TopicPartition) ThreadMetadata(org.apache.kafka.streams.ThreadMetadata) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Test(org.junit.Test)

Example 7 with StreamsMetricsImpl

use of org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl in project kafka by apache.

the class StreamThreadTest method shouldCatchHandleCorruptionOnTaskCorruptedExceptionPath.

@Test
@SuppressWarnings("unchecked")
public void shouldCatchHandleCorruptionOnTaskCorruptedExceptionPath() {
    final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
    expect(taskManager.producerClientIds()).andStubReturn(Collections.emptySet());
    final Consumer<byte[], byte[]> consumer = mock(Consumer.class);
    final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
    consumer.subscribe((Collection<String>) anyObject(), anyObject());
    EasyMock.expectLastCall().anyTimes();
    consumer.unsubscribe();
    EasyMock.expectLastCall().anyTimes();
    expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
    expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
    EasyMock.replay(consumerGroupMetadata);
    final Task task1 = mock(Task.class);
    final Task task2 = mock(Task.class);
    final TaskId taskId1 = new TaskId(0, 0);
    final TaskId taskId2 = new TaskId(0, 2);
    final Set<TaskId> corruptedTasks = singleton(taskId1);
    expect(task1.state()).andReturn(Task.State.RUNNING).anyTimes();
    expect(task1.id()).andReturn(taskId1).anyTimes();
    expect(task2.state()).andReturn(Task.State.RUNNING).anyTimes();
    expect(task2.id()).andReturn(taskId2).anyTimes();
    expect(taskManager.handleCorruption(corruptedTasks)).andReturn(true);
    EasyMock.replay(task1, task2, taskManager, consumer);
    final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
    final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
    topologyMetadata.buildAndRewriteTopology();
    final StreamThread thread = new StreamThread(mockTime, config, null, consumer, consumer, null, null, taskManager, streamsMetrics, topologyMetadata, CLIENT_ID, new LogContext(""), new AtomicInteger(), new AtomicLong(Long.MAX_VALUE), new LinkedList<>(), null, HANDLER, null) {

        @Override
        void runOnce() {
            setState(State.PENDING_SHUTDOWN);
            throw new TaskCorruptedException(corruptedTasks);
        }
    }.updateThreadMetadata(getSharedAdminClientId(CLIENT_ID));
    thread.run();
    verify(taskManager);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) LogContext(org.apache.kafka.common.utils.LogContext) LinkedList(java.util.LinkedList) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) AtomicLong(java.util.concurrent.atomic.AtomicLong) TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) Test(org.junit.Test)

Example 8 with StreamsMetricsImpl

use of org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl in project kafka by apache.

the class StreamThreadTest method shouldCatchTaskMigratedExceptionOnOnTaskCorruptedExceptionPath.

@Test
@SuppressWarnings("unchecked")
public void shouldCatchTaskMigratedExceptionOnOnTaskCorruptedExceptionPath() {
    final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
    expect(taskManager.producerClientIds()).andStubReturn(Collections.emptySet());
    final Consumer<byte[], byte[]> consumer = mock(Consumer.class);
    final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
    expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
    expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
    consumer.subscribe((Collection<String>) anyObject(), anyObject());
    EasyMock.expectLastCall().anyTimes();
    consumer.unsubscribe();
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(consumerGroupMetadata);
    final Task task1 = mock(Task.class);
    final Task task2 = mock(Task.class);
    final TaskId taskId1 = new TaskId(0, 0);
    final TaskId taskId2 = new TaskId(0, 2);
    final Set<TaskId> corruptedTasks = singleton(taskId1);
    expect(task1.state()).andReturn(Task.State.RUNNING).anyTimes();
    expect(task1.id()).andReturn(taskId1).anyTimes();
    expect(task2.state()).andReturn(Task.State.RUNNING).anyTimes();
    expect(task2.id()).andReturn(taskId2).anyTimes();
    taskManager.handleCorruption(corruptedTasks);
    expectLastCall().andThrow(new TaskMigratedException("Task migrated", new RuntimeException("non-corrupted task migrated")));
    taskManager.handleLostAll();
    expectLastCall();
    EasyMock.replay(task1, task2, taskManager, consumer);
    final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
    final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
    topologyMetadata.buildAndRewriteTopology();
    final StreamThread thread = new StreamThread(mockTime, config, null, consumer, consumer, null, null, taskManager, streamsMetrics, topologyMetadata, CLIENT_ID, new LogContext(""), new AtomicInteger(), new AtomicLong(Long.MAX_VALUE), new LinkedList<>(), null, HANDLER, null) {

        @Override
        void runOnce() {
            setState(State.PENDING_SHUTDOWN);
            throw new TaskCorruptedException(corruptedTasks);
        }
    }.updateThreadMetadata(getSharedAdminClientId(CLIENT_ID));
    thread.setState(StreamThread.State.STARTING);
    thread.runLoop();
    verify(taskManager);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) LogContext(org.apache.kafka.common.utils.LogContext) LinkedList(java.util.LinkedList) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) AtomicLong(java.util.concurrent.atomic.AtomicLong) TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) Test(org.junit.Test)

Example 9 with StreamsMetricsImpl

use of org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl in project kafka by apache.

the class StreamThreadTest method shouldNotReturnDataAfterTaskMigrated.

@Test
public void shouldNotReturnDataAfterTaskMigrated() {
    final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
    expect(taskManager.producerClientIds()).andStubReturn(Collections.emptySet());
    final InternalTopologyBuilder internalTopologyBuilder = EasyMock.createNiceMock(InternalTopologyBuilder.class);
    expect(internalTopologyBuilder.fullSourceTopicNames()).andReturn(Collections.singletonList(topic1)).times(2);
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<>(OffsetResetStrategy.LATEST);
    consumer.subscribe(Collections.singletonList(topic1), new MockRebalanceListener());
    consumer.rebalance(Collections.singletonList(t1p1));
    consumer.updateEndOffsets(Collections.singletonMap(t1p1, 10L));
    consumer.seekToEnd(Collections.singletonList(t1p1));
    final ChangelogReader changelogReader = new MockChangelogReader() {

        @Override
        public void restore(final Map<TaskId, Task> tasks) {
            consumer.addRecord(new ConsumerRecord<>(topic1, 1, 11, new byte[0], new byte[0]));
            consumer.addRecord(new ConsumerRecord<>(topic1, 1, 12, new byte[1], new byte[0]));
            throw new TaskMigratedException("Changelog restore found task migrated", new RuntimeException("restore task migrated"));
        }
    };
    taskManager.handleLostAll();
    EasyMock.replay(taskManager, internalTopologyBuilder);
    final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
    final StreamThread thread = new StreamThread(mockTime, config, null, consumer, consumer, changelogReader, null, taskManager, streamsMetrics, new TopologyMetadata(internalTopologyBuilder, config), CLIENT_ID, new LogContext(""), new AtomicInteger(), new AtomicLong(Long.MAX_VALUE), new LinkedList<>(), null, HANDLER, null).updateThreadMetadata(getSharedAdminClientId(CLIENT_ID));
    final StreamsException thrown = assertThrows(StreamsException.class, thread::run);
    verify(taskManager);
    assertThat(thrown.getCause(), isA(IllegalStateException.class));
    // The Mock consumer shall throw as the assignment has been wiped out, but records are assigned.
    assertEquals("No current assignment for partition topic1-1", thrown.getCause().getMessage());
    assertFalse(consumer.shouldRebalance());
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) LogContext(org.apache.kafka.common.utils.LogContext) LinkedList(java.util.LinkedList) MockRebalanceListener(org.apache.kafka.clients.consumer.internals.MockRebalanceListener) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) Collections.emptyMap(java.util.Collections.emptyMap) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) Test(org.junit.Test)

Example 10 with StreamsMetricsImpl

use of org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl in project kafka by apache.

the class StreamThreadTest method shouldThrowTaskMigratedExceptionHandlingRevocation.

@Test
public void shouldThrowTaskMigratedExceptionHandlingRevocation() {
    final Set<TopicPartition> assignedPartitions = Collections.singleton(t1p1);
    final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
    expect(taskManager.producerClientIds()).andStubReturn(Collections.emptySet());
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<>(OffsetResetStrategy.LATEST);
    consumer.assign(assignedPartitions);
    consumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    consumer.updateEndOffsets(Collections.singletonMap(t1p1, 10L));
    taskManager.handleRevocation(assignedPartitions);
    EasyMock.expectLastCall().andThrow(new TaskMigratedException("Revocation non fatal exception", new RuntimeException()));
    EasyMock.replay(taskManager);
    final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
    final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
    topologyMetadata.buildAndRewriteTopology();
    final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata).updateThreadMetadata(getSharedAdminClientId(CLIENT_ID));
    consumer.schedulePollTask(() -> {
        thread.setState(StreamThread.State.PARTITIONS_REVOKED);
        thread.rebalanceListener().onPartitionsRevoked(assignedPartitions);
    });
    thread.setState(StreamThread.State.STARTING);
    assertThrows(TaskMigratedException.class, thread::runOnce);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) Test(org.junit.Test)

Aggregations

StreamsMetricsImpl (org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl)54 Test (org.junit.Test)31 Metrics (org.apache.kafka.common.metrics.Metrics)22 MockTime (org.apache.kafka.common.utils.MockTime)22 LogContext (org.apache.kafka.common.utils.LogContext)18 StreamsConfig (org.apache.kafka.streams.StreamsConfig)18 TaskId (org.apache.kafka.streams.processor.TaskId)16 ConsumerGroupMetadata (org.apache.kafka.clients.consumer.ConsumerGroupMetadata)14 Properties (java.util.Properties)12 Before (org.junit.Before)11 File (java.io.File)9 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)9 AtomicLong (java.util.concurrent.atomic.AtomicLong)9 LinkedList (java.util.LinkedList)8 Set (java.util.Set)8 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)8 TopicPartition (org.apache.kafka.common.TopicPartition)8 MockStateRestoreListener (org.apache.kafka.test.MockStateRestoreListener)8 HashMap (java.util.HashMap)7 KeyValue (org.apache.kafka.streams.KeyValue)7