Search in sources :

Example 31 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.

the class AbstractTaskTest method shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenAuthorizationException.

@Test(expected = ProcessorStateException.class)
public void shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenAuthorizationException() throws Exception {
    final Consumer consumer = mockConsumer(new AuthorizationException("blah"));
    final AbstractTask task = createTask(consumer);
    task.initializeOffsetLimits();
}
Also used : Consumer(org.apache.kafka.clients.consumer.Consumer) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) AuthorizationException(org.apache.kafka.common.errors.AuthorizationException) Test(org.junit.Test)

Example 32 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.

the class ProcessorTopologyTestDriver method createRestoreConsumer.

/**
     * Utility method that creates the {@link MockConsumer} used for restoring state, which should not be done by this
     * driver object unless this method is overwritten with a functional consumer.
     *
     * @param id the ID of the stream task
     * @param storeToChangelogTopic the map of the names of the stores to the changelog topics
     * @return the mock consumer; never null
     */
protected MockConsumer<byte[], byte[]> createRestoreConsumer(TaskId id, Map<String, String> storeToChangelogTopic) {
    MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.LATEST) {

        @Override
        public synchronized void seekToEnd(Collection<TopicPartition> partitions) {
        // do nothing ...
        }

        @Override
        public synchronized void seekToBeginning(Collection<TopicPartition> partitions) {
        // do nothing ...
        }

        @Override
        public synchronized long position(TopicPartition partition) {
            // do nothing ...
            return 0L;
        }
    };
    // For each store ...
    for (Map.Entry<String, String> storeAndTopic : storeToChangelogTopic.entrySet()) {
        String topicName = storeAndTopic.getValue();
        // Set up the restore-state topic ...
        // consumer.subscribe(new TopicPartition(topicName, 1));
        // Set up the partition that matches the ID (which is what ProcessorStateManager expects) ...
        List<PartitionInfo> partitionInfos = new ArrayList<>();
        partitionInfos.add(new PartitionInfo(topicName, PARTITION_ID, null, null, null));
        consumer.updatePartitions(topicName, partitionInfos);
        consumer.updateEndOffsets(Collections.singletonMap(new TopicPartition(topicName, PARTITION_ID), 0L));
    }
    return consumer;
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) Collection(java.util.Collection) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) HashMap(java.util.HashMap) Map(java.util.Map)

Example 33 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.

the class StoreChangelogReaderTest method shouldThrowStreamsExceptionWhenTimeoutExceptionThrown.

@SuppressWarnings("unchecked")
@Test
public void shouldThrowStreamsExceptionWhenTimeoutExceptionThrown() throws Exception {
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) {

        @Override
        public Map<String, List<PartitionInfo>> listTopics() {
            throw new TimeoutException("KABOOM!");
        }
    };
    final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 0);
    try {
        changelogReader.validatePartitionExists(topicPartition, "store");
        fail("Should have thrown streams exception");
    } catch (final StreamsException e) {
    // pass
    }
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) List(java.util.List) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) MockTime(org.apache.kafka.common.utils.MockTime) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Example 34 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project apache-kafka-on-k8s by banzaicloud.

the class TopologyTestDriver method createRestoreConsumer.

private MockConsumer<byte[], byte[]> createRestoreConsumer(final Map<String, String> storeToChangelogTopic) {
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.LATEST) {

        @Override
        public synchronized void seekToEnd(final Collection<TopicPartition> partitions) {
        }

        @Override
        public synchronized void seekToBeginning(final Collection<TopicPartition> partitions) {
        }

        @Override
        public synchronized long position(final TopicPartition partition) {
            return 0L;
        }
    };
    // for each store
    for (final Map.Entry<String, String> storeAndTopic : storeToChangelogTopic.entrySet()) {
        final String topicName = storeAndTopic.getValue();
        // Set up the restore-state topic ...
        // consumer.subscribe(new TopicPartition(topicName, 0));
        // Set up the partition that matches the ID (which is what ProcessorStateManager expects) ...
        final List<PartitionInfo> partitionInfos = new ArrayList<>();
        partitionInfos.add(new PartitionInfo(topicName, PARTITION_ID, null, null, null));
        consumer.updatePartitions(topicName, partitionInfos);
        consumer.updateEndOffsets(Collections.singletonMap(new TopicPartition(topicName, PARTITION_ID), 0L));
    }
    return consumer;
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) Collection(java.util.Collection) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Map(java.util.Map) HashMap(java.util.HashMap)

Example 35 with MockConsumer

use of org.apache.kafka.clients.consumer.MockConsumer in project apache-kafka-on-k8s by banzaicloud.

the class StreamThreadTest method shouldInjectProducerPerTaskUsingClientSupplierOnCreateIfEosEnable.

@Test
public void shouldInjectProducerPerTaskUsingClientSupplierOnCreateIfEosEnable() {
    internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
    final StreamThread thread = createStreamThread(clientId, new StreamsConfig(configProps(true)), true);
    thread.setState(StreamThread.State.RUNNING);
    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    assignedPartitions.add(t1p2);
    activeTasks.put(task1, Collections.singleton(t1p1));
    activeTasks.put(task2, Collections.singleton(t1p2));
    thread.taskManager().setAssignmentMetadata(activeTasks, Collections.<TaskId, Set<TopicPartition>>emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.consumer;
    mockConsumer.assign(assignedPartitions);
    Map<TopicPartition, Long> beginOffsets = new HashMap<>();
    beginOffsets.put(t1p1, 0L);
    beginOffsets.put(t1p2, 0L);
    mockConsumer.updateBeginningOffsets(beginOffsets);
    thread.rebalanceListener.onPartitionsAssigned(new HashSet<>(assignedPartitions));
    thread.runOnce(-1);
    assertEquals(thread.tasks().size(), clientSupplier.producers.size());
    assertSame(clientSupplier.consumer, thread.consumer);
    assertSame(clientSupplier.restoreConsumer, thread.restoreConsumer);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TopicPartition(org.apache.kafka.common.TopicPartition) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) InternalStreamsBuilderTest(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest) Test(org.junit.Test)

Aggregations

MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)56 Test (org.junit.Test)46 TopicPartition (org.apache.kafka.common.TopicPartition)44 HashMap (java.util.HashMap)27 TaskId (org.apache.kafka.streams.processor.TaskId)27 Set (java.util.Set)24 ArrayList (java.util.ArrayList)20 StreamsConfig (org.apache.kafka.streams.StreamsConfig)20 PartitionInfo (org.apache.kafka.common.PartitionInfo)18 HashSet (java.util.HashSet)17 Utils.mkSet (org.apache.kafka.common.utils.Utils.mkSet)15 Map (java.util.Map)10 Properties (java.util.Properties)10 StreamsException (org.apache.kafka.streams.errors.StreamsException)10 Collections.emptySet (java.util.Collections.emptySet)9 InternalStreamsBuilderTest (org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest)9 KafkaException (org.apache.kafka.common.KafkaException)8 TimeoutException (org.apache.kafka.common.errors.TimeoutException)8 List (java.util.List)7 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)7