Search in sources :

Example 46 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.

the class StreamThreadTest method shouldReinitializeRevivedTasksInAnyState.

@Test
public void shouldReinitializeRevivedTasksInAnyState() {
    final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(configProps(false)), false);
    final String storeName = "store";
    final String storeChangelog = "stream-thread-test-store-changelog";
    final TopicPartition storeChangelogTopicPartition = new TopicPartition(storeChangelog, 1);
    internalTopologyBuilder.addSource(null, "name", null, null, null, topic1);
    final AtomicBoolean shouldThrow = new AtomicBoolean(false);
    final AtomicBoolean processed = new AtomicBoolean(false);
    internalTopologyBuilder.addProcessor("proc", () -> record -> {
        if (shouldThrow.get()) {
            throw new TaskCorruptedException(singleton(task1));
        } else {
            processed.set(true);
        }
    }, "name");
    internalTopologyBuilder.addStateStore(Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(storeName), Serdes.String(), Serdes.String()), "proc");
    thread.setState(StreamThread.State.STARTING);
    thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    activeTasks.put(task1, Collections.singleton(t1p1));
    thread.taskManager().handleAssignment(activeTasks, emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
    mockConsumer.assign(assignedPartitions);
    mockConsumer.updateBeginningOffsets(mkMap(mkEntry(t1p1, 0L)));
    final MockConsumer<byte[], byte[]> restoreConsumer = (MockConsumer<byte[], byte[]>) thread.restoreConsumer();
    restoreConsumer.updateBeginningOffsets(mkMap(mkEntry(storeChangelogTopicPartition, 0L)));
    final MockAdminClient admin = (MockAdminClient) thread.adminClient();
    admin.updateEndOffsets(singletonMap(storeChangelogTopicPartition, 0L));
    thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
    // the first iteration completes the restoration
    thread.runOnce();
    assertThat(thread.activeTasks().size(), equalTo(1));
    // the second transits to running and unpause the input
    thread.runOnce();
    // the third actually polls, processes the record, and throws the corruption exception
    addRecord(mockConsumer, 0L);
    shouldThrow.set(true);
    final TaskCorruptedException taskCorruptedException = assertThrows(TaskCorruptedException.class, thread::runOnce);
    // Now, we can handle the corruption
    thread.taskManager().handleCorruption(taskCorruptedException.corruptedTasks());
    // again, complete the restoration
    thread.runOnce();
    // transit to running and unpause
    thread.runOnce();
    // process the record
    addRecord(mockConsumer, 0L);
    shouldThrow.set(false);
    assertThat(processed.get(), is(false));
    thread.runOnce();
    assertThat(processed.get(), is(true));
    thread.taskManager().shutdown(true);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) MockAdminClient(org.apache.kafka.clients.admin.MockAdminClient) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) TopicPartition(org.apache.kafka.common.TopicPartition) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 47 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.

the class StreamThreadTest method shouldThrowTaskMigratedExceptionHandlingTaskLost.

@Test
public void shouldThrowTaskMigratedExceptionHandlingTaskLost() {
    final Set<TopicPartition> assignedPartitions = Collections.singleton(t1p1);
    final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
    expect(taskManager.producerClientIds()).andStubReturn(Collections.emptySet());
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<>(OffsetResetStrategy.LATEST);
    consumer.assign(assignedPartitions);
    consumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    consumer.updateEndOffsets(Collections.singletonMap(t1p1, 10L));
    taskManager.handleLostAll();
    EasyMock.expectLastCall().andThrow(new TaskMigratedException("Task lost exception", new RuntimeException()));
    EasyMock.replay(taskManager);
    final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
    final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
    topologyMetadata.buildAndRewriteTopology();
    final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata).updateThreadMetadata(getSharedAdminClientId(CLIENT_ID));
    consumer.schedulePollTask(() -> {
        thread.setState(StreamThread.State.PARTITIONS_REVOKED);
        thread.rebalanceListener().onPartitionsLost(assignedPartitions);
    });
    thread.setState(StreamThread.State.STARTING);
    assertThrows(TaskMigratedException.class, thread::runOnce);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) Test(org.junit.Test)

Example 48 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.

the class StreamThreadTest method shouldRespectNumIterationsInMainLoop.

@Test
public void shouldRespectNumIterationsInMainLoop() {
    final List<MockApiProcessor<byte[], byte[], Object, Object>> mockProcessors = new LinkedList<>();
    internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
    internalTopologyBuilder.addProcessor("processor1", (ProcessorSupplier<byte[], byte[], ?, ?>) () -> {
        final MockApiProcessor<byte[], byte[], Object, Object> processor = new MockApiProcessor<>(PunctuationType.WALL_CLOCK_TIME, 10L);
        mockProcessors.add(processor);
        return processor;
    }, "source1");
    internalTopologyBuilder.addProcessor("processor2", (ProcessorSupplier<byte[], byte[], ?, ?>) () -> new MockApiProcessor<>(PunctuationType.STREAM_TIME, 10L), "source1");
    final Properties properties = new Properties();
    properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
    final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig(APPLICATION_ID, "localhost:2171", Serdes.ByteArraySerde.class.getName(), Serdes.ByteArraySerde.class.getName(), properties));
    final StreamThread thread = createStreamThread(CLIENT_ID, config, false);
    thread.setState(StreamThread.State.STARTING);
    thread.setState(StreamThread.State.PARTITIONS_REVOKED);
    final TaskId task1 = new TaskId(0, t1p1.partition());
    final Set<TopicPartition> assignedPartitions = Collections.singleton(t1p1);
    thread.taskManager().handleAssignment(Collections.singletonMap(task1, assignedPartitions), emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
    mockConsumer.assign(Collections.singleton(t1p1));
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
    thread.runOnce();
    // processed one record, punctuated after the first record, and hence num.iterations is still 1
    long offset = -1;
    addRecord(mockConsumer, ++offset, 0L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(1));
    // processed one more record without punctuation, and bump num.iterations to 2
    addRecord(mockConsumer, ++offset, 1L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(2));
    // processed zero records, early exit and iterations stays as 2
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(2));
    // system time based punctutation without processing any record, iteration stays as 2
    mockTime.sleep(11L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(2));
    // system time based punctutation after processing a record, half iteration to 1
    mockTime.sleep(11L);
    addRecord(mockConsumer, ++offset, 5L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(1));
    // processed two records, bumping up iterations to 3 (1 + 2)
    addRecord(mockConsumer, ++offset, 5L);
    addRecord(mockConsumer, ++offset, 6L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(3));
    // stream time based punctutation halves to 1
    addRecord(mockConsumer, ++offset, 11L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(1));
    // processed three records, bumping up iterations to 3 (1 + 2)
    addRecord(mockConsumer, ++offset, 12L);
    addRecord(mockConsumer, ++offset, 13L);
    addRecord(mockConsumer, ++offset, 14L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(3));
    mockProcessors.forEach(MockApiProcessor::requestCommit);
    addRecord(mockConsumer, ++offset, 15L);
    thread.runOnce();
    // user requested commit should half iteration to 1
    assertThat(thread.currentNumIterations(), equalTo(1));
    // processed three records, bumping up iterations to 3 (1 + 2)
    addRecord(mockConsumer, ++offset, 15L);
    addRecord(mockConsumer, ++offset, 16L);
    addRecord(mockConsumer, ++offset, 17L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(3));
    // time based commit without processing, should keep the iteration as 3
    mockTime.sleep(90L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(3));
    // time based commit without processing, should half the iteration to 1
    mockTime.sleep(90L);
    addRecord(mockConsumer, ++offset, 18L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(1));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) MockApiProcessor(org.apache.kafka.test.MockApiProcessor) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Properties(java.util.Properties) LinkedList(java.util.LinkedList) Serdes(org.apache.kafka.common.serialization.Serdes) TopicPartition(org.apache.kafka.common.TopicPartition) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 49 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.

the class StreamThreadTest method shouldLogAndRecordSkippedMetricForDeserializationException.

@Test
public void shouldLogAndRecordSkippedMetricForDeserializationException() {
    internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
    final Properties config = configProps(false);
    config.setProperty(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG, LogAndContinueExceptionHandler.class.getName());
    config.setProperty(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName());
    final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(config), false);
    thread.setState(StreamThread.State.STARTING);
    thread.setState(StreamThread.State.PARTITIONS_REVOKED);
    final TaskId task1 = new TaskId(0, t1p1.partition());
    final Set<TopicPartition> assignedPartitions = Collections.singleton(t1p1);
    thread.taskManager().handleAssignment(Collections.singletonMap(task1, assignedPartitions), emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
    mockConsumer.assign(Collections.singleton(t1p1));
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
    thread.runOnce();
    long offset = -1;
    mockConsumer.addRecord(new ConsumerRecord<>(t1p1.topic(), t1p1.partition(), ++offset, -1, TimestampType.CREATE_TIME, -1, -1, new byte[0], "I am not an integer.".getBytes(), new RecordHeaders(), Optional.empty()));
    mockConsumer.addRecord(new ConsumerRecord<>(t1p1.topic(), t1p1.partition(), ++offset, -1, TimestampType.CREATE_TIME, -1, -1, new byte[0], "I am not an integer.".getBytes(), new RecordHeaders(), Optional.empty()));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RecordDeserializer.class)) {
        thread.runOnce();
        final List<String> strings = appender.getMessages();
        assertTrue(strings.contains("stream-thread [" + Thread.currentThread().getName() + "] task [0_1]" + " Skipping record due to deserialization error. topic=[topic1] partition=[1] offset=[0]"));
        assertTrue(strings.contains("stream-thread [" + Thread.currentThread().getName() + "] task [0_1]" + " Skipping record due to deserialization error. topic=[topic1] partition=[1] offset=[1]"));
    }
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Properties(java.util.Properties) LogAndContinueExceptionHandler(org.apache.kafka.streams.errors.LogAndContinueExceptionHandler) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) TopicPartition(org.apache.kafka.common.TopicPartition) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 50 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.

the class StreamThreadTest method shouldInjectProducerPerTaskUsingClientSupplierOnCreateIfEosAlphaEnabled.

@SuppressWarnings("deprecation")
@Test
public void shouldInjectProducerPerTaskUsingClientSupplierOnCreateIfEosAlphaEnabled() {
    internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
    final Properties props = configProps(true);
    props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE);
    final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(props), true);
    thread.setState(StreamThread.State.STARTING);
    thread.rebalanceListener().onPartitionsRevoked(Collections.emptyList());
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    assignedPartitions.add(t1p2);
    activeTasks.put(task1, Collections.singleton(t1p1));
    activeTasks.put(task2, Collections.singleton(t1p2));
    thread.taskManager().handleAssignment(activeTasks, emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
    mockConsumer.assign(assignedPartitions);
    final Map<TopicPartition, Long> beginOffsets = new HashMap<>();
    beginOffsets.put(t1p1, 0L);
    beginOffsets.put(t1p2, 0L);
    mockConsumer.updateBeginningOffsets(beginOffsets);
    thread.rebalanceListener().onPartitionsAssigned(new HashSet<>(assignedPartitions));
    thread.runOnce();
    assertEquals(thread.activeTasks().size(), clientSupplier.producers.size());
    assertSame(clientSupplier.consumer, thread.mainConsumer());
    assertSame(clientSupplier.restoreConsumer, thread.restoreConsumer());
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Properties(java.util.Properties) TopicPartition(org.apache.kafka.common.TopicPartition) AtomicLong(java.util.concurrent.atomic.AtomicLong) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Aggregations

MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)55 Test (org.junit.Test)46 TopicPartition (org.apache.kafka.common.TopicPartition)43 TaskId (org.apache.kafka.streams.processor.TaskId)27 HashMap (java.util.HashMap)26 Set (java.util.Set)24 ArrayList (java.util.ArrayList)20 StreamsConfig (org.apache.kafka.streams.StreamsConfig)20 PartitionInfo (org.apache.kafka.common.PartitionInfo)18 HashSet (java.util.HashSet)17 Utils.mkSet (org.apache.kafka.common.utils.Utils.mkSet)15 Map (java.util.Map)10 Properties (java.util.Properties)10 StreamsException (org.apache.kafka.streams.errors.StreamsException)10 Collections.emptySet (java.util.Collections.emptySet)9 InternalStreamsBuilderTest (org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest)9 KafkaException (org.apache.kafka.common.KafkaException)8 TimeoutException (org.apache.kafka.common.errors.TimeoutException)8 List (java.util.List)7 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)7