Search in sources :

Example 36 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project apache-kafka-on-k8s by banzaicloud.

the class StreamThreadTest method shouldCloseAllTaskProducersOnCloseIfEosEnabled.

@Test
public void shouldCloseAllTaskProducersOnCloseIfEosEnabled() {
    internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
    final StreamThread thread = createStreamThread(clientId, new StreamsConfig(configProps(true)), true);
    thread.setState(StreamThread.State.RUNNING);
    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    assignedPartitions.add(t1p2);
    activeTasks.put(task1, Collections.singleton(t1p1));
    activeTasks.put(task2, Collections.singleton(t1p2));
    thread.taskManager().setAssignmentMetadata(activeTasks, Collections.<TaskId, Set<TopicPartition>>emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.consumer;
    mockConsumer.assign(assignedPartitions);
    Map<TopicPartition, Long> beginOffsets = new HashMap<>();
    beginOffsets.put(t1p1, 0L);
    beginOffsets.put(t1p2, 0L);
    mockConsumer.updateBeginningOffsets(beginOffsets);
    thread.rebalanceListener.onPartitionsAssigned(assignedPartitions);
    thread.shutdown();
    thread.run();
    for (final Task task : thread.tasks().values()) {
        assertTrue(((MockProducer) ((RecordCollectorImpl) ((StreamTask) task).recordCollector()).producer()).closed());
    }
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TopicPartition(org.apache.kafka.common.TopicPartition) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) InternalStreamsBuilderTest(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest) Test(org.junit.Test)

Example 37 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project apache-kafka-on-k8s by banzaicloud.

the class StreamThreadTest method shouldRecoverFromInvalidOffsetExceptionOnRestoreAndFinishRestore.

@Test
public void shouldRecoverFromInvalidOffsetExceptionOnRestoreAndFinishRestore() throws Exception {
    internalStreamsBuilder.stream(Collections.singleton("topic"), consumed).groupByKey().count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>as("count"));
    final StreamThread thread = createStreamThread("cliendId", config, false);
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.consumer;
    final MockConsumer<byte[], byte[]> mockRestoreConsumer = (MockConsumer<byte[], byte[]>) thread.restoreConsumer;
    final TopicPartition topicPartition = new TopicPartition("topic", 0);
    final Set<TopicPartition> topicPartitionSet = Collections.singleton(topicPartition);
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    activeTasks.put(new TaskId(0, 0), topicPartitionSet);
    thread.taskManager().setAssignmentMetadata(activeTasks, Collections.<TaskId, Set<TopicPartition>>emptyMap());
    mockConsumer.updatePartitions("topic", new ArrayList<PartitionInfo>() {

        {
            add(new PartitionInfo("topic", 0, null, new Node[0], new Node[0]));
        }
    });
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(topicPartition, 0L));
    mockRestoreConsumer.updatePartitions("stream-thread-test-count-changelog", new ArrayList<PartitionInfo>() {

        {
            add(new PartitionInfo("stream-thread-test-count-changelog", 0, null, new Node[0], new Node[0]));
        }
    });
    final TopicPartition changelogPartition = new TopicPartition("stream-thread-test-count-changelog", 0);
    final Set<TopicPartition> changelogPartitionSet = Collections.singleton(changelogPartition);
    mockRestoreConsumer.updateBeginningOffsets(Collections.singletonMap(changelogPartition, 0L));
    mockRestoreConsumer.updateEndOffsets(Collections.singletonMap(changelogPartition, 2L));
    mockConsumer.schedulePollTask(new Runnable() {

        @Override
        public void run() {
            thread.setState(StreamThread.State.PARTITIONS_REVOKED);
            thread.rebalanceListener.onPartitionsAssigned(topicPartitionSet);
        }
    });
    try {
        thread.start();
        TestUtils.waitForCondition(new TestCondition() {

            @Override
            public boolean conditionMet() {
                return mockRestoreConsumer.assignment().size() == 1;
            }
        }, "Never restore first record");
        mockRestoreConsumer.addRecord(new ConsumerRecord<>("stream-thread-test-count-changelog", 0, 0L, "K1".getBytes(), "V1".getBytes()));
        TestUtils.waitForCondition(new TestCondition() {

            @Override
            public boolean conditionMet() {
                return mockRestoreConsumer.position(changelogPartition) == 1L;
            }
        }, "Never restore first record");
        mockRestoreConsumer.setException(new InvalidOffsetException("Try Again!") {

            @Override
            public Set<TopicPartition> partitions() {
                return changelogPartitionSet;
            }
        });
        mockRestoreConsumer.addRecord(new ConsumerRecord<>("stream-thread-test-count-changelog", 0, 0L, "K1".getBytes(), "V1".getBytes()));
        mockRestoreConsumer.addRecord(new ConsumerRecord<>("stream-thread-test-count-changelog", 0, 1L, "K2".getBytes(), "V2".getBytes()));
        TestUtils.waitForCondition(new TestCondition() {

            @Override
            public boolean conditionMet() {
                mockRestoreConsumer.assign(changelogPartitionSet);
                return mockRestoreConsumer.position(changelogPartition) == 2L;
            }
        }, "Never finished restore");
    } finally {
        thread.shutdown();
        thread.join(10000);
    }
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) InvalidOffsetException(org.apache.kafka.clients.consumer.InvalidOffsetException) Bytes(org.apache.kafka.common.utils.Bytes) TopicPartition(org.apache.kafka.common.TopicPartition) TestCondition(org.apache.kafka.test.TestCondition) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) InternalStreamsBuilderTest(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest) Test(org.junit.Test)

Example 38 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project apache-kafka-on-k8s by banzaicloud.

the class StreamThreadTest method shouldReturnActiveTaskMetadataWhileRunningState.

@Test
public void shouldReturnActiveTaskMetadataWhileRunningState() {
    internalTopologyBuilder.addSource(null, "source", null, null, null, topic1);
    final StreamThread thread = createStreamThread(clientId, config, false);
    thread.setState(StreamThread.State.RUNNING);
    thread.rebalanceListener.onPartitionsRevoked(null);
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    activeTasks.put(task1, Collections.singleton(t1p1));
    thread.taskManager().setAssignmentMetadata(activeTasks, Collections.<TaskId, Set<TopicPartition>>emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.consumer;
    mockConsumer.assign(assignedPartitions);
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    thread.rebalanceListener.onPartitionsAssigned(assignedPartitions);
    thread.runOnce(-1);
    ThreadMetadata threadMetadata = thread.threadMetadata();
    assertEquals(StreamThread.State.RUNNING.name(), threadMetadata.threadState());
    assertTrue(threadMetadata.activeTasks().contains(new TaskMetadata(task1.toString(), Utils.mkSet(t1p1))));
    assertTrue(threadMetadata.standbyTasks().isEmpty());
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) TaskMetadata(org.apache.kafka.streams.processor.TaskMetadata) ThreadMetadata(org.apache.kafka.streams.processor.ThreadMetadata) ArrayList(java.util.ArrayList) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) InternalStreamsBuilderTest(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest) Test(org.junit.Test)

Example 39 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project apache-kafka-on-k8s by banzaicloud.

the class StreamThreadTest method shouldCloseTaskAsZombieAndRemoveFromActiveTasksIfProducerWasFencedWhileProcessing.

@Test
public void shouldCloseTaskAsZombieAndRemoveFromActiveTasksIfProducerWasFencedWhileProcessing() throws Exception {
    internalTopologyBuilder.addSource(null, "source", null, null, null, topic1);
    internalTopologyBuilder.addSink("sink", "dummyTopic", null, null, null, "source");
    final StreamThread thread = createStreamThread(clientId, new StreamsConfig(configProps(true)), true);
    final MockConsumer<byte[], byte[]> consumer = clientSupplier.consumer;
    consumer.updatePartitions(topic1, Collections.singletonList(new PartitionInfo(topic1, 1, null, null, null)));
    thread.setState(StreamThread.State.RUNNING);
    thread.rebalanceListener.onPartitionsRevoked(null);
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    activeTasks.put(task1, Collections.singleton(t1p1));
    thread.taskManager().setAssignmentMetadata(activeTasks, Collections.<TaskId, Set<TopicPartition>>emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.consumer;
    mockConsumer.assign(assignedPartitions);
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    thread.rebalanceListener.onPartitionsAssigned(assignedPartitions);
    thread.runOnce(-1);
    assertThat(thread.tasks().size(), equalTo(1));
    final MockProducer producer = clientSupplier.producers.get(0);
    // change consumer subscription from "pattern" to "manual" to be able to call .addRecords()
    consumer.updateBeginningOffsets(Collections.singletonMap(assignedPartitions.iterator().next(), 0L));
    consumer.unsubscribe();
    consumer.assign(new HashSet<>(assignedPartitions));
    consumer.addRecord(new ConsumerRecord<>(topic1, 1, 0, new byte[0], new byte[0]));
    mockTime.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) + 1);
    thread.runOnce(-1);
    assertThat(producer.history().size(), equalTo(1));
    assertFalse(producer.transactionCommitted());
    mockTime.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) + 1L);
    TestUtils.waitForCondition(new TestCondition() {

        @Override
        public boolean conditionMet() {
            return producer.commitCount() == 1;
        }
    }, "StreamsThread did not commit transaction.");
    producer.fenceProducer();
    mockTime.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) + 1L);
    consumer.addRecord(new ConsumerRecord<>(topic1, 1, 0, new byte[0], new byte[0]));
    try {
        thread.runOnce(-1);
        fail("Should have thrown TaskMigratedException");
    } catch (final TaskMigratedException expected) {
    /* ignore */
    }
    TestUtils.waitForCondition(new TestCondition() {

        @Override
        public boolean conditionMet() {
            return thread.tasks().isEmpty();
        }
    }, "StreamsThread did not remove fenced zombie task.");
    assertThat(producer.commitCount(), equalTo(1L));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashSet(java.util.HashSet) MockProducer(org.apache.kafka.clients.producer.MockProducer) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TopicPartition(org.apache.kafka.common.TopicPartition) TestCondition(org.apache.kafka.test.TestCondition) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) InternalStreamsBuilderTest(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest) Test(org.junit.Test)

Example 40 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project apache-kafka-on-k8s by banzaicloud.

the class StreamThreadTest method testPartitionAssignmentChangeForSingleGroup.

@Test
public void testPartitionAssignmentChangeForSingleGroup() {
    internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
    final StreamThread thread = createStreamThread(clientId, config, false);
    final StateListenerStub stateListener = new StateListenerStub();
    thread.setStateListener(stateListener);
    assertEquals(thread.state(), StreamThread.State.CREATED);
    final ConsumerRebalanceListener rebalanceListener = thread.rebalanceListener;
    thread.setState(StreamThread.State.RUNNING);
    List<TopicPartition> revokedPartitions;
    List<TopicPartition> assignedPartitions;
    // revoke nothing
    revokedPartitions = Collections.emptyList();
    rebalanceListener.onPartitionsRevoked(revokedPartitions);
    assertEquals(thread.state(), StreamThread.State.PARTITIONS_REVOKED);
    // assign single partition
    assignedPartitions = Collections.singletonList(t1p1);
    thread.taskManager().setAssignmentMetadata(Collections.<TaskId, Set<TopicPartition>>emptyMap(), Collections.<TaskId, Set<TopicPartition>>emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.consumer;
    mockConsumer.assign(assignedPartitions);
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    rebalanceListener.onPartitionsAssigned(assignedPartitions);
    thread.runOnce(-1);
    assertEquals(thread.state(), StreamThread.State.RUNNING);
    Assert.assertEquals(4, stateListener.numChanges);
    Assert.assertEquals(StreamThread.State.PARTITIONS_ASSIGNED, stateListener.oldState);
    thread.shutdown();
    assertTrue(thread.state() == StreamThread.State.PENDING_SHUTDOWN);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) InternalStreamsBuilderTest(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest) Test(org.junit.Test)

Aggregations

MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)56 Test (org.junit.Test)46 TopicPartition (org.apache.kafka.common.TopicPartition)44 HashMap (java.util.HashMap)27 TaskId (org.apache.kafka.streams.processor.TaskId)27 Set (java.util.Set)24 ArrayList (java.util.ArrayList)20 StreamsConfig (org.apache.kafka.streams.StreamsConfig)20 PartitionInfo (org.apache.kafka.common.PartitionInfo)18 HashSet (java.util.HashSet)17 Utils.mkSet (org.apache.kafka.common.utils.Utils.mkSet)15 Map (java.util.Map)10 Properties (java.util.Properties)10 StreamsException (org.apache.kafka.streams.errors.StreamsException)10 Collections.emptySet (java.util.Collections.emptySet)9 InternalStreamsBuilderTest (org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest)9 KafkaException (org.apache.kafka.common.KafkaException)8 TimeoutException (org.apache.kafka.common.errors.TimeoutException)8 List (java.util.List)7 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)7