Search in sources :

Example 41 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project apache-kafka-on-k8s by banzaicloud.

the class AbstractTaskTest method shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenAuthorizationException.

@Test(expected = ProcessorStateException.class)
public void shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenAuthorizationException() {
    final Consumer consumer = mockConsumer(new AuthorizationException("blah"));
    final AbstractTask task = createTask(consumer, Collections.<StateStore, String>emptyMap());
    task.updateOffsetLimits();
}
Also used : MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Consumer(org.apache.kafka.clients.consumer.Consumer) AuthorizationException(org.apache.kafka.common.errors.AuthorizationException) Test(org.junit.Test)

Example 42 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.

the class GlobalStreamThreadTest method shouldThrowStreamsExceptionOnStartupIfExceptionOccurred.

@Test
public void shouldThrowStreamsExceptionOnStartupIfExceptionOccurred() throws Exception {
    final MockConsumer<byte[], byte[]> mockConsumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {

        @Override
        public List<PartitionInfo> partitionsFor(final String topic) {
            throw new RuntimeException("KABOOM!");
        }
    };
    final StateStore globalStore = builder.globalStateStores().get(GLOBAL_STORE_NAME);
    globalStreamThread = new GlobalStreamThread(builder.buildGlobalStateTopology(), config, mockConsumer, new StateDirectory(config, time, true, false), 0, new StreamsMetricsImpl(new Metrics(), "test-client", StreamsConfig.METRICS_LATEST, time), time, "clientId", stateRestoreListener, e -> {
    });
    try {
        globalStreamThread.start();
        fail("Should have thrown StreamsException if start up failed");
    } catch (final StreamsException e) {
        assertThat(e.getCause(), instanceOf(RuntimeException.class));
        assertThat(e.getCause().getMessage(), equalTo("KABOOM!"));
    }
    globalStreamThread.join();
    assertThat(globalStore.isOpen(), is(false));
    assertFalse(globalStreamThread.stillRunning());
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) StreamsConfig(org.apache.kafka.streams.StreamsConfig) MockTime(org.apache.kafka.common.utils.MockTime) InternalNameProvider(org.apache.kafka.streams.kstream.internals.InternalNameProvider) IsInstanceOf.instanceOf(org.hamcrest.core.IsInstanceOf.instanceOf) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) HashMap(java.util.HashMap) StreamsException(org.apache.kafka.streams.errors.StreamsException) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) DEAD(org.apache.kafka.streams.processor.internals.GlobalStreamThread.State.DEAD) ContextualProcessor(org.apache.kafka.streams.processor.api.ContextualProcessor) ProcessorSupplier(org.apache.kafka.streams.processor.api.ProcessorSupplier) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Serdes(org.apache.kafka.common.serialization.Serdes) Record(org.apache.kafka.streams.processor.api.Record) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) Assert.fail(org.junit.Assert.fail) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Before(org.junit.Before) TopicPartition(org.apache.kafka.common.TopicPartition) RUNNING(org.apache.kafka.streams.processor.internals.GlobalStreamThread.State.RUNNING) MockStateRestoreListener(org.apache.kafka.test.MockStateRestoreListener) TestUtils(org.apache.kafka.test.TestUtils) Set(java.util.Set) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) PartitionInfo(org.apache.kafka.common.PartitionInfo) MaterializedInternal(org.apache.kafka.streams.kstream.internals.MaterializedInternal) File(java.io.File) Bytes(org.apache.kafka.common.utils.Bytes) ConsumerRecordUtil.record(org.apache.kafka.streams.processor.internals.testutil.ConsumerRecordUtil.record) List(java.util.List) Metrics(org.apache.kafka.common.metrics.Metrics) TimestampedKeyValueStoreMaterializer(org.apache.kafka.streams.kstream.internals.TimestampedKeyValueStoreMaterializer) StateStore(org.apache.kafka.streams.processor.StateStore) Assert.assertFalse(org.junit.Assert.assertFalse) Materialized(org.apache.kafka.streams.kstream.Materialized) InvalidOffsetException(org.apache.kafka.clients.consumer.InvalidOffsetException) Node(org.apache.kafka.common.Node) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) Metrics(org.apache.kafka.common.metrics.Metrics) StreamsException(org.apache.kafka.streams.errors.StreamsException) StateStore(org.apache.kafka.streams.processor.StateStore) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) Test(org.junit.Test)

Example 43 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.

the class StreamThreadTest method shouldReinitializeRevivedTasksInAnyState.

@Test
public void shouldReinitializeRevivedTasksInAnyState() {
    final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(configProps(false)), false);
    final String storeName = "store";
    final String storeChangelog = "stream-thread-test-store-changelog";
    final TopicPartition storeChangelogTopicPartition = new TopicPartition(storeChangelog, 1);
    internalTopologyBuilder.addSource(null, "name", null, null, null, topic1);
    final AtomicBoolean shouldThrow = new AtomicBoolean(false);
    final AtomicBoolean processed = new AtomicBoolean(false);
    internalTopologyBuilder.addProcessor("proc", () -> record -> {
        if (shouldThrow.get()) {
            throw new TaskCorruptedException(singleton(task1));
        } else {
            processed.set(true);
        }
    }, "name");
    internalTopologyBuilder.addStateStore(Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(storeName), Serdes.String(), Serdes.String()), "proc");
    thread.setState(StreamThread.State.STARTING);
    thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    activeTasks.put(task1, Collections.singleton(t1p1));
    thread.taskManager().handleAssignment(activeTasks, emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
    mockConsumer.assign(assignedPartitions);
    mockConsumer.updateBeginningOffsets(mkMap(mkEntry(t1p1, 0L)));
    final MockConsumer<byte[], byte[]> restoreConsumer = (MockConsumer<byte[], byte[]>) thread.restoreConsumer();
    restoreConsumer.updateBeginningOffsets(mkMap(mkEntry(storeChangelogTopicPartition, 0L)));
    final MockAdminClient admin = (MockAdminClient) thread.adminClient();
    admin.updateEndOffsets(singletonMap(storeChangelogTopicPartition, 0L));
    thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
    // the first iteration completes the restoration
    thread.runOnce();
    assertThat(thread.activeTasks().size(), equalTo(1));
    // the second transits to running and unpause the input
    thread.runOnce();
    // the third actually polls, processes the record, and throws the corruption exception
    addRecord(mockConsumer, 0L);
    shouldThrow.set(true);
    final TaskCorruptedException taskCorruptedException = assertThrows(TaskCorruptedException.class, thread::runOnce);
    // Now, we can handle the corruption
    thread.taskManager().handleCorruption(taskCorruptedException.corruptedTasks());
    // again, complete the restoration
    thread.runOnce();
    // transit to running and unpause
    thread.runOnce();
    // process the record
    addRecord(mockConsumer, 0L);
    shouldThrow.set(false);
    assertThat(processed.get(), is(false));
    thread.runOnce();
    assertThat(processed.get(), is(true));
    thread.taskManager().shutdown(true);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) MockAdminClient(org.apache.kafka.clients.admin.MockAdminClient) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) TopicPartition(org.apache.kafka.common.TopicPartition) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 44 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.

the class StreamThreadTest method shouldThrowTaskMigratedExceptionHandlingTaskLost.

@Test
public void shouldThrowTaskMigratedExceptionHandlingTaskLost() {
    final Set<TopicPartition> assignedPartitions = Collections.singleton(t1p1);
    final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
    expect(taskManager.producerClientIds()).andStubReturn(Collections.emptySet());
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<>(OffsetResetStrategy.LATEST);
    consumer.assign(assignedPartitions);
    consumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    consumer.updateEndOffsets(Collections.singletonMap(t1p1, 10L));
    taskManager.handleLostAll();
    EasyMock.expectLastCall().andThrow(new TaskMigratedException("Task lost exception", new RuntimeException()));
    EasyMock.replay(taskManager);
    final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
    final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
    topologyMetadata.buildAndRewriteTopology();
    final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata).updateThreadMetadata(getSharedAdminClientId(CLIENT_ID));
    consumer.schedulePollTask(() -> {
        thread.setState(StreamThread.State.PARTITIONS_REVOKED);
        thread.rebalanceListener().onPartitionsLost(assignedPartitions);
    });
    thread.setState(StreamThread.State.STARTING);
    assertThrows(TaskMigratedException.class, thread::runOnce);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) Test(org.junit.Test)

Example 45 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.

the class StreamThreadTest method shouldRespectNumIterationsInMainLoop.

@Test
public void shouldRespectNumIterationsInMainLoop() {
    final List<MockApiProcessor<byte[], byte[], Object, Object>> mockProcessors = new LinkedList<>();
    internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
    internalTopologyBuilder.addProcessor("processor1", (ProcessorSupplier<byte[], byte[], ?, ?>) () -> {
        final MockApiProcessor<byte[], byte[], Object, Object> processor = new MockApiProcessor<>(PunctuationType.WALL_CLOCK_TIME, 10L);
        mockProcessors.add(processor);
        return processor;
    }, "source1");
    internalTopologyBuilder.addProcessor("processor2", (ProcessorSupplier<byte[], byte[], ?, ?>) () -> new MockApiProcessor<>(PunctuationType.STREAM_TIME, 10L), "source1");
    final Properties properties = new Properties();
    properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
    final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig(APPLICATION_ID, "localhost:2171", Serdes.ByteArraySerde.class.getName(), Serdes.ByteArraySerde.class.getName(), properties));
    final StreamThread thread = createStreamThread(CLIENT_ID, config, false);
    thread.setState(StreamThread.State.STARTING);
    thread.setState(StreamThread.State.PARTITIONS_REVOKED);
    final TaskId task1 = new TaskId(0, t1p1.partition());
    final Set<TopicPartition> assignedPartitions = Collections.singleton(t1p1);
    thread.taskManager().handleAssignment(Collections.singletonMap(task1, assignedPartitions), emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
    mockConsumer.assign(Collections.singleton(t1p1));
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
    thread.runOnce();
    // processed one record, punctuated after the first record, and hence num.iterations is still 1
    long offset = -1;
    addRecord(mockConsumer, ++offset, 0L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(1));
    // processed one more record without punctuation, and bump num.iterations to 2
    addRecord(mockConsumer, ++offset, 1L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(2));
    // processed zero records, early exit and iterations stays as 2
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(2));
    // system time based punctutation without processing any record, iteration stays as 2
    mockTime.sleep(11L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(2));
    // system time based punctutation after processing a record, half iteration to 1
    mockTime.sleep(11L);
    addRecord(mockConsumer, ++offset, 5L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(1));
    // processed two records, bumping up iterations to 3 (1 + 2)
    addRecord(mockConsumer, ++offset, 5L);
    addRecord(mockConsumer, ++offset, 6L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(3));
    // stream time based punctutation halves to 1
    addRecord(mockConsumer, ++offset, 11L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(1));
    // processed three records, bumping up iterations to 3 (1 + 2)
    addRecord(mockConsumer, ++offset, 12L);
    addRecord(mockConsumer, ++offset, 13L);
    addRecord(mockConsumer, ++offset, 14L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(3));
    mockProcessors.forEach(MockApiProcessor::requestCommit);
    addRecord(mockConsumer, ++offset, 15L);
    thread.runOnce();
    // user requested commit should half iteration to 1
    assertThat(thread.currentNumIterations(), equalTo(1));
    // processed three records, bumping up iterations to 3 (1 + 2)
    addRecord(mockConsumer, ++offset, 15L);
    addRecord(mockConsumer, ++offset, 16L);
    addRecord(mockConsumer, ++offset, 17L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(3));
    // time based commit without processing, should keep the iteration as 3
    mockTime.sleep(90L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(3));
    // time based commit without processing, should half the iteration to 1
    mockTime.sleep(90L);
    addRecord(mockConsumer, ++offset, 18L);
    thread.runOnce();
    assertThat(thread.currentNumIterations(), equalTo(1));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) MockApiProcessor(org.apache.kafka.test.MockApiProcessor) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Properties(java.util.Properties) LinkedList(java.util.LinkedList) Serdes(org.apache.kafka.common.serialization.Serdes) TopicPartition(org.apache.kafka.common.TopicPartition) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Aggregations

MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)56 Test (org.junit.Test)46 TopicPartition (org.apache.kafka.common.TopicPartition)44 HashMap (java.util.HashMap)27 TaskId (org.apache.kafka.streams.processor.TaskId)27 Set (java.util.Set)24 ArrayList (java.util.ArrayList)20 StreamsConfig (org.apache.kafka.streams.StreamsConfig)20 PartitionInfo (org.apache.kafka.common.PartitionInfo)18 HashSet (java.util.HashSet)17 Utils.mkSet (org.apache.kafka.common.utils.Utils.mkSet)15 Map (java.util.Map)10 Properties (java.util.Properties)10 StreamsException (org.apache.kafka.streams.errors.StreamsException)10 Collections.emptySet (java.util.Collections.emptySet)9 InternalStreamsBuilderTest (org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest)9 KafkaException (org.apache.kafka.common.KafkaException)8 TimeoutException (org.apache.kafka.common.errors.TimeoutException)8 List (java.util.List)7 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)7