Search in sources :

Example 6 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project flink by apache.

the class Kafka010FetcherTest method testCancellationWhenEmitBlocks.

@Test
public void testCancellationWhenEmitBlocks() throws Exception {
    // ----- some test data -----
    final String topic = "test-topic";
    final int partition = 3;
    final byte[] payload = new byte[] { 1, 2, 3, 4 };
    final List<ConsumerRecord<byte[], byte[]>> records = Arrays.asList(new ConsumerRecord<byte[], byte[]>(topic, partition, 15, payload, payload), new ConsumerRecord<byte[], byte[]>(topic, partition, 16, payload, payload), new ConsumerRecord<byte[], byte[]>(topic, partition, 17, payload, payload));
    final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> data = new HashMap<>();
    data.put(new TopicPartition(topic, partition), records);
    final ConsumerRecords<byte[], byte[]> consumerRecords = new ConsumerRecords<>(data);
    // ----- the test consumer -----
    final KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
    when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {

        @Override
        public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) {
            return consumerRecords;
        }
    });
    whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
    // ----- build a fetcher -----
    BlockingSourceContext<String> sourceContext = new BlockingSourceContext<>();
    Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition(topic, partition), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
    KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
    final Kafka010Fetcher<String> fetcher = new Kafka010Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
    null, /* punctuated watermark extractor */
    new TestProcessingTimeService(), 10, /* watermark interval */
    this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
    // ----- run the fetcher -----
    final AtomicReference<Throwable> error = new AtomicReference<>();
    final Thread fetcherRunner = new Thread("fetcher runner") {

        @Override
        public void run() {
            try {
                fetcher.runFetchLoop();
            } catch (Throwable t) {
                error.set(t);
            }
        }
    };
    fetcherRunner.start();
    // wait until the thread started to emit records to the source context
    sourceContext.waitTillHasBlocker();
    // now we try to cancel the fetcher, including the interruption usually done on the task thread
    // once it has finished, there must be no more thread blocked on the source context
    fetcher.cancel();
    fetcherRunner.interrupt();
    fetcherRunner.join();
    assertFalse("fetcher threads did not properly finish", sourceContext.isStillBlocking());
}
Also used : UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) HashMap(java.util.HashMap) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) Properties(java.util.Properties) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) KeyedDeserializationSchemaWrapper(org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper) List(java.util.List) AtomicReference(java.util.concurrent.atomic.AtomicReference) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) KafkaConsumerThread(org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Mockito.anyLong(org.mockito.Mockito.anyLong) SimpleStringSchema(org.apache.flink.streaming.util.serialization.SimpleStringSchema) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) Kafka010Fetcher(org.apache.flink.streaming.connectors.kafka.internal.Kafka010Fetcher) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 7 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project flink by apache.

the class Kafka010FetcherTest method ensureOffsetsGetCommitted.

@Test
public void ensureOffsetsGetCommitted() throws Exception {
    // test data
    final KafkaTopicPartition testPartition1 = new KafkaTopicPartition("test", 42);
    final KafkaTopicPartition testPartition2 = new KafkaTopicPartition("another", 99);
    final Map<KafkaTopicPartition, Long> testCommitData1 = new HashMap<>();
    testCommitData1.put(testPartition1, 11L);
    testCommitData1.put(testPartition2, 18L);
    final Map<KafkaTopicPartition, Long> testCommitData2 = new HashMap<>();
    testCommitData2.put(testPartition1, 19L);
    testCommitData2.put(testPartition2, 28L);
    final BlockingQueue<Map<TopicPartition, OffsetAndMetadata>> commitStore = new LinkedBlockingQueue<>();
    // ----- the mock consumer with poll(), wakeup(), and commit(A)sync calls ----
    final MultiShotLatch blockerLatch = new MultiShotLatch();
    KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
    when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {

        @Override
        public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) throws InterruptedException {
            blockerLatch.await();
            return ConsumerRecords.empty();
        }
    });
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) {
            blockerLatch.trigger();
            return null;
        }
    }).when(mockConsumer).wakeup();
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) {
            @SuppressWarnings("unchecked") Map<TopicPartition, OffsetAndMetadata> offsets = (Map<TopicPartition, OffsetAndMetadata>) invocation.getArguments()[0];
            OffsetCommitCallback callback = (OffsetCommitCallback) invocation.getArguments()[1];
            commitStore.add(offsets);
            callback.onComplete(offsets, null);
            return null;
        }
    }).when(mockConsumer).commitAsync(Mockito.<Map<TopicPartition, OffsetAndMetadata>>any(), any(OffsetCommitCallback.class));
    // make sure the fetcher creates the mock consumer
    whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
    // ----- create the test fetcher -----
    @SuppressWarnings("unchecked") SourceContext<String> sourceContext = mock(SourceContext.class);
    Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition("test", 42), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
    KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
    final Kafka010Fetcher<String> fetcher = new Kafka010Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic assigner */
    null, /* punctuated assigner */
    new TestProcessingTimeService(), 10, getClass().getClassLoader(), "taskname-with-subtask", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
    // ----- run the fetcher -----
    final AtomicReference<Throwable> error = new AtomicReference<>();
    final Thread fetcherRunner = new Thread("fetcher runner") {

        @Override
        public void run() {
            try {
                fetcher.runFetchLoop();
            } catch (Throwable t) {
                error.set(t);
            }
        }
    };
    fetcherRunner.start();
    // ----- trigger the first offset commit -----
    fetcher.commitInternalOffsetsToKafka(testCommitData1);
    Map<TopicPartition, OffsetAndMetadata> result1 = commitStore.take();
    for (Entry<TopicPartition, OffsetAndMetadata> entry : result1.entrySet()) {
        TopicPartition partition = entry.getKey();
        if (partition.topic().equals("test")) {
            assertEquals(42, partition.partition());
            assertEquals(12L, entry.getValue().offset());
        } else if (partition.topic().equals("another")) {
            assertEquals(99, partition.partition());
            assertEquals(18L, entry.getValue().offset());
        }
    }
    // ----- trigger the second offset commit -----
    fetcher.commitInternalOffsetsToKafka(testCommitData2);
    Map<TopicPartition, OffsetAndMetadata> result2 = commitStore.take();
    for (Entry<TopicPartition, OffsetAndMetadata> entry : result2.entrySet()) {
        TopicPartition partition = entry.getKey();
        if (partition.topic().equals("test")) {
            assertEquals(42, partition.partition());
            assertEquals(20L, entry.getValue().offset());
        } else if (partition.topic().equals("another")) {
            assertEquals(99, partition.partition());
            assertEquals(28L, entry.getValue().offset());
        }
    }
    // ----- test done, wait till the fetcher is done for a clean shutdown -----
    fetcher.cancel();
    fetcherRunner.join();
    // check that there were no errors in the fetcher
    final Throwable caughtError = error.get();
    if (caughtError != null && !(caughtError instanceof Handover.ClosedException)) {
        throw new Exception("Exception in the fetcher", caughtError);
    }
}
Also used : UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) HashMap(java.util.HashMap) MultiShotLatch(org.apache.flink.core.testutils.MultiShotLatch) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Properties(java.util.Properties) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) KeyedDeserializationSchemaWrapper(org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Handover(org.apache.flink.streaming.connectors.kafka.internal.Handover) AtomicReference(java.util.concurrent.atomic.AtomicReference) KafkaConsumerThread(org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread) InvocationOnMock(org.mockito.invocation.InvocationOnMock) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) Mockito.anyLong(org.mockito.Mockito.anyLong) SimpleStringSchema(org.apache.flink.streaming.util.serialization.SimpleStringSchema) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) HashMap(java.util.HashMap) Map(java.util.Map) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) Kafka010Fetcher(org.apache.flink.streaming.connectors.kafka.internal.Kafka010Fetcher) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 8 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project flink by apache.

the class Kafka09FetcherTest method testCommitDoesNotBlock.

@Test
public void testCommitDoesNotBlock() throws Exception {
    // test data
    final KafkaTopicPartition testPartition = new KafkaTopicPartition("test", 42);
    final Map<KafkaTopicPartition, Long> testCommitData = new HashMap<>();
    testCommitData.put(testPartition, 11L);
    // to synchronize when the consumer is in its blocking method
    final OneShotLatch sync = new OneShotLatch();
    // ----- the mock consumer with blocking poll calls ----
    final MultiShotLatch blockerLatch = new MultiShotLatch();
    KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
    when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {

        @Override
        public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) throws InterruptedException {
            sync.trigger();
            blockerLatch.await();
            return ConsumerRecords.empty();
        }
    });
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) {
            blockerLatch.trigger();
            return null;
        }
    }).when(mockConsumer).wakeup();
    // make sure the fetcher creates the mock consumer
    whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
    // ----- create the test fetcher -----
    @SuppressWarnings("unchecked") SourceContext<String> sourceContext = mock(SourceContext.class);
    Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition("test", 42), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
    KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
    final Kafka09Fetcher<String> fetcher = new Kafka09Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
    null, /* punctuated watermark extractor */
    new TestProcessingTimeService(), 10, /* watermark interval */
    this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
    // ----- run the fetcher -----
    final AtomicReference<Throwable> error = new AtomicReference<>();
    final Thread fetcherRunner = new Thread("fetcher runner") {

        @Override
        public void run() {
            try {
                fetcher.runFetchLoop();
            } catch (Throwable t) {
                error.set(t);
            }
        }
    };
    fetcherRunner.start();
    // wait until the fetcher has reached the method of interest
    sync.await();
    // ----- trigger the offset commit -----
    final AtomicReference<Throwable> commitError = new AtomicReference<>();
    final Thread committer = new Thread("committer runner") {

        @Override
        public void run() {
            try {
                fetcher.commitInternalOffsetsToKafka(testCommitData);
            } catch (Throwable t) {
                commitError.set(t);
            }
        }
    };
    committer.start();
    // ----- ensure that the committer finishes in time  -----
    committer.join(30000);
    assertFalse("The committer did not finish in time", committer.isAlive());
    // ----- test done, wait till the fetcher is done for a clean shutdown -----
    fetcher.cancel();
    fetcherRunner.join();
    // check that there were no errors in the fetcher
    final Throwable fetcherError = error.get();
    if (fetcherError != null && !(fetcherError instanceof Handover.ClosedException)) {
        throw new Exception("Exception in the fetcher", fetcherError);
    }
    final Throwable committerError = commitError.get();
    if (committerError != null) {
        throw new Exception("Exception in the committer", committerError);
    }
}
Also used : UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) HashMap(java.util.HashMap) MultiShotLatch(org.apache.flink.core.testutils.MultiShotLatch) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) Properties(java.util.Properties) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) KeyedDeserializationSchemaWrapper(org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper) Handover(org.apache.flink.streaming.connectors.kafka.internal.Handover) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) AtomicReference(java.util.concurrent.atomic.AtomicReference) KafkaConsumerThread(org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread) Kafka09Fetcher(org.apache.flink.streaming.connectors.kafka.internal.Kafka09Fetcher) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Mockito.anyLong(org.mockito.Mockito.anyLong) SimpleStringSchema(org.apache.flink.streaming.util.serialization.SimpleStringSchema) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 9 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project flink by apache.

the class Kafka09FetcherTest method testCancellationWhenEmitBlocks.

@Test
public void testCancellationWhenEmitBlocks() throws Exception {
    // ----- some test data -----
    final String topic = "test-topic";
    final int partition = 3;
    final byte[] payload = new byte[] { 1, 2, 3, 4 };
    final List<ConsumerRecord<byte[], byte[]>> records = Arrays.asList(new ConsumerRecord<byte[], byte[]>(topic, partition, 15, payload, payload), new ConsumerRecord<byte[], byte[]>(topic, partition, 16, payload, payload), new ConsumerRecord<byte[], byte[]>(topic, partition, 17, payload, payload));
    final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> data = new HashMap<>();
    data.put(new TopicPartition(topic, partition), records);
    final ConsumerRecords<byte[], byte[]> consumerRecords = new ConsumerRecords<>(data);
    // ----- the test consumer -----
    final KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
    when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {

        @Override
        public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) {
            return consumerRecords;
        }
    });
    whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
    // ----- build a fetcher -----
    BlockingSourceContext<String> sourceContext = new BlockingSourceContext<>();
    Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition(topic, partition), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
    KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
    final Kafka09Fetcher<String> fetcher = new Kafka09Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
    null, /* punctuated watermark extractor */
    new TestProcessingTimeService(), 10, /* watermark interval */
    this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
    // ----- run the fetcher -----
    final AtomicReference<Throwable> error = new AtomicReference<>();
    final Thread fetcherRunner = new Thread("fetcher runner") {

        @Override
        public void run() {
            try {
                fetcher.runFetchLoop();
            } catch (Throwable t) {
                error.set(t);
            }
        }
    };
    fetcherRunner.start();
    // wait until the thread started to emit records to the source context
    sourceContext.waitTillHasBlocker();
    // now we try to cancel the fetcher, including the interruption usually done on the task thread
    // once it has finished, there must be no more thread blocked on the source context
    fetcher.cancel();
    fetcherRunner.interrupt();
    fetcherRunner.join();
    assertFalse("fetcher threads did not properly finish", sourceContext.isStillBlocking());
}
Also used : UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) HashMap(java.util.HashMap) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) Properties(java.util.Properties) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) KeyedDeserializationSchemaWrapper(org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper) List(java.util.List) AtomicReference(java.util.concurrent.atomic.AtomicReference) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) KafkaConsumerThread(org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread) Kafka09Fetcher(org.apache.flink.streaming.connectors.kafka.internal.Kafka09Fetcher) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Mockito.anyLong(org.mockito.Mockito.anyLong) SimpleStringSchema(org.apache.flink.streaming.util.serialization.SimpleStringSchema) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 10 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project samza by apache.

the class TestStreamProcessor method verifyNumMessages.

/**
   * Consumes data from the topic until there are no new messages for a while
   * and asserts that the number of consumed messages is as expected.
   */
private void verifyNumMessages(String topic, int expectedNumMessages) {
    KafkaConsumer consumer = getKafkaConsumer();
    consumer.subscribe(Collections.singletonList(topic));
    int count = 0;
    int emptyPollCount = 0;
    while (count < expectedNumMessages && emptyPollCount < 5) {
        ConsumerRecords records = consumer.poll(5000);
        if (!records.isEmpty()) {
            Iterator<ConsumerRecord> iterator = records.iterator();
            while (iterator.hasNext()) {
                ConsumerRecord record = iterator.next();
                Assert.assertEquals(new String((byte[]) record.value()), String.valueOf(count));
                count++;
            }
        } else {
            emptyPollCount++;
        }
    }
    Assert.assertEquals(count, expectedNumMessages);
}
Also used : KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Aggregations

ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)15 HashMap (java.util.HashMap)11 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)10 TopicPartition (org.apache.kafka.common.TopicPartition)10 Properties (java.util.Properties)8 Test (org.junit.Test)7 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)7 AtomicReference (java.util.concurrent.atomic.AtomicReference)6 UnregisteredMetricsGroup (org.apache.flink.metrics.groups.UnregisteredMetricsGroup)6 KafkaConsumerThread (org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread)6 KafkaTopicPartition (org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition)6 TestProcessingTimeService (org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService)6 KeyedDeserializationSchemaWrapper (org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper)6 SimpleStringSchema (org.apache.flink.streaming.util.serialization.SimpleStringSchema)6 Mockito.anyLong (org.mockito.Mockito.anyLong)6 InvocationOnMock (org.mockito.invocation.InvocationOnMock)6 List (java.util.List)4 MultiShotLatch (org.apache.flink.core.testutils.MultiShotLatch)4 Handover (org.apache.flink.streaming.connectors.kafka.internal.Handover)4 Collection (java.util.Collection)3