Search in sources :

Example 11 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project samza by apache.

the class TestZkStreamProcessorBase method verifyNumMessages.

/**
   * Consumes data from the topic until there are no new messages for a while
   * and asserts that the number of consumed messages is as expected.
   */
protected void verifyNumMessages(String topic, final Map<Integer, Boolean> expectedValues, int expectedNumMessages) {
    KafkaConsumer consumer = getKafkaConsumer();
    consumer.subscribe(Collections.singletonList(topic));
    Map<Integer, Boolean> map = new HashMap<>(expectedValues);
    int count = 0;
    int emptyPollCount = 0;
    while (count < expectedNumMessages && emptyPollCount < 5) {
        ConsumerRecords records = consumer.poll(5000);
        if (!records.isEmpty()) {
            Iterator<ConsumerRecord> iterator = records.iterator();
            while (iterator.hasNext()) {
                ConsumerRecord record = iterator.next();
                String val = new String((byte[]) record.value());
                LOG.info("Got value " + val + "; count = " + count + "; out of " + expectedNumMessages);
                Integer valI = Integer.valueOf(val);
                if (valI < BAD_MESSAGE_KEY) {
                    map.put(valI, true);
                    count++;
                }
            }
        } else {
            emptyPollCount++;
            LOG.warn("empty polls " + emptyPollCount);
        }
    }
    // filter out numbers we did not get
    long numFalse = map.values().stream().filter(v -> !v).count();
    Assert.assertEquals("didn't get this number of events ", 0, numFalse);
    Assert.assertEquals(expectedNumMessages, count);
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) JobCoordinator(org.apache.samza.coordinator.JobCoordinator) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) TestZkUtils(org.apache.samza.zk.TestZkUtils) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InitableTask(org.apache.samza.task.InitableTask) MessageCollector(org.apache.samza.task.MessageCollector) SystemStream(org.apache.samza.system.SystemStream) Map(java.util.Map) StreamTask(org.apache.samza.task.StreamTask) ApplicationConfig(org.apache.samza.config.ApplicationConfig) StandaloneTestUtils(org.apache.samza.test.StandaloneTestUtils) MapConfig(org.apache.samza.config.MapConfig) Before(org.junit.Before) TaskContext(org.apache.samza.task.TaskContext) Util(org.apache.samza.util.Util) Properties(java.util.Properties) IncomingMessageEnvelope(org.apache.samza.system.IncomingMessageEnvelope) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) StreamTaskFactory(org.apache.samza.task.StreamTaskFactory) ZkConfig(org.apache.samza.config.ZkConfig) TaskCoordinator(org.apache.samza.task.TaskCoordinator) ExecutionException(java.util.concurrent.ExecutionException) CountDownLatch(java.util.concurrent.CountDownLatch) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TestUtils(kafka.utils.TestUtils) OutgoingMessageEnvelope(org.apache.samza.system.OutgoingMessageEnvelope) JobCoordinatorConfig(org.apache.samza.config.JobCoordinatorConfig) Config(org.apache.samza.config.Config) StandaloneIntegrationTestHarness(org.apache.samza.test.StandaloneIntegrationTestHarness) Assert(org.junit.Assert) Collections(java.util.Collections) JobCoordinatorFactory(org.apache.samza.coordinator.JobCoordinatorFactory) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) HashMap(java.util.HashMap) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 12 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project flink by apache.

the class Kafka09FetcherTest method ensureOffsetsGetCommitted.

@Test
public void ensureOffsetsGetCommitted() throws Exception {
    // test data
    final KafkaTopicPartition testPartition1 = new KafkaTopicPartition("test", 42);
    final KafkaTopicPartition testPartition2 = new KafkaTopicPartition("another", 99);
    final Map<KafkaTopicPartition, Long> testCommitData1 = new HashMap<>();
    testCommitData1.put(testPartition1, 11L);
    testCommitData1.put(testPartition2, 18L);
    final Map<KafkaTopicPartition, Long> testCommitData2 = new HashMap<>();
    testCommitData2.put(testPartition1, 19L);
    testCommitData2.put(testPartition2, 28L);
    final BlockingQueue<Map<TopicPartition, OffsetAndMetadata>> commitStore = new LinkedBlockingQueue<>();
    // ----- the mock consumer with poll(), wakeup(), and commit(A)sync calls ----
    final MultiShotLatch blockerLatch = new MultiShotLatch();
    KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
    when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {

        @Override
        public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) throws InterruptedException {
            blockerLatch.await();
            return ConsumerRecords.empty();
        }
    });
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) {
            blockerLatch.trigger();
            return null;
        }
    }).when(mockConsumer).wakeup();
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) {
            @SuppressWarnings("unchecked") Map<TopicPartition, OffsetAndMetadata> offsets = (Map<TopicPartition, OffsetAndMetadata>) invocation.getArguments()[0];
            OffsetCommitCallback callback = (OffsetCommitCallback) invocation.getArguments()[1];
            commitStore.add(offsets);
            callback.onComplete(offsets, null);
            return null;
        }
    }).when(mockConsumer).commitAsync(Mockito.<Map<TopicPartition, OffsetAndMetadata>>any(), any(OffsetCommitCallback.class));
    // make sure the fetcher creates the mock consumer
    whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
    // ----- create the test fetcher -----
    @SuppressWarnings("unchecked") SourceContext<String> sourceContext = mock(SourceContext.class);
    Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition("test", 42), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
    KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
    final Kafka09Fetcher<String> fetcher = new Kafka09Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
    null, /* punctuated watermark extractor */
    new TestProcessingTimeService(), 10, /* watermark interval */
    this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
    // ----- run the fetcher -----
    final AtomicReference<Throwable> error = new AtomicReference<>();
    final Thread fetcherRunner = new Thread("fetcher runner") {

        @Override
        public void run() {
            try {
                fetcher.runFetchLoop();
            } catch (Throwable t) {
                error.set(t);
            }
        }
    };
    fetcherRunner.start();
    // ----- trigger the first offset commit -----
    fetcher.commitInternalOffsetsToKafka(testCommitData1);
    Map<TopicPartition, OffsetAndMetadata> result1 = commitStore.take();
    for (Entry<TopicPartition, OffsetAndMetadata> entry : result1.entrySet()) {
        TopicPartition partition = entry.getKey();
        if (partition.topic().equals("test")) {
            assertEquals(42, partition.partition());
            assertEquals(12L, entry.getValue().offset());
        } else if (partition.topic().equals("another")) {
            assertEquals(99, partition.partition());
            assertEquals(17L, entry.getValue().offset());
        }
    }
    // ----- trigger the second offset commit -----
    fetcher.commitInternalOffsetsToKafka(testCommitData2);
    Map<TopicPartition, OffsetAndMetadata> result2 = commitStore.take();
    for (Entry<TopicPartition, OffsetAndMetadata> entry : result2.entrySet()) {
        TopicPartition partition = entry.getKey();
        if (partition.topic().equals("test")) {
            assertEquals(42, partition.partition());
            assertEquals(20L, entry.getValue().offset());
        } else if (partition.topic().equals("another")) {
            assertEquals(99, partition.partition());
            assertEquals(27L, entry.getValue().offset());
        }
    }
    // ----- test done, wait till the fetcher is done for a clean shutdown -----
    fetcher.cancel();
    fetcherRunner.join();
    // check that there were no errors in the fetcher
    final Throwable caughtError = error.get();
    if (caughtError != null && !(caughtError instanceof Handover.ClosedException)) {
        throw new Exception("Exception in the fetcher", caughtError);
    }
}
Also used : UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) HashMap(java.util.HashMap) MultiShotLatch(org.apache.flink.core.testutils.MultiShotLatch) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Properties(java.util.Properties) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) KeyedDeserializationSchemaWrapper(org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Handover(org.apache.flink.streaming.connectors.kafka.internal.Handover) AtomicReference(java.util.concurrent.atomic.AtomicReference) KafkaConsumerThread(org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread) Kafka09Fetcher(org.apache.flink.streaming.connectors.kafka.internal.Kafka09Fetcher) InvocationOnMock(org.mockito.invocation.InvocationOnMock) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) Mockito.anyLong(org.mockito.Mockito.anyLong) SimpleStringSchema(org.apache.flink.streaming.util.serialization.SimpleStringSchema) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) HashMap(java.util.HashMap) Map(java.util.Map) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 13 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.

the class WorkerSinkTaskThreadedTest method expectPolls.

// Note that this can only be called once per test currently
private Capture<Collection<SinkRecord>> expectPolls(final long pollDelayMs) throws Exception {
    // Stub out all the consumer stream/iterator responses, which we just want to verify occur,
    // but don't care about the exact details here.
    EasyMock.expect(consumer.poll(EasyMock.anyLong())).andStubAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {

        @Override
        public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
            // "Sleep" so time will progress
            time.sleep(pollDelayMs);
            ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
            recordsReturned++;
            return records;
        }
    });
    EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY)).anyTimes();
    EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE)).anyTimes();
    final Capture<SinkRecord> recordCapture = EasyMock.newCapture();
    EasyMock.expect(transformationChain.apply(EasyMock.capture(recordCapture))).andAnswer(new IAnswer<SinkRecord>() {

        @Override
        public SinkRecord answer() {
            return recordCapture.getValue();
        }
    }).anyTimes();
    Capture<Collection<SinkRecord>> capturedRecords = EasyMock.newCapture(CaptureType.ALL);
    sinkTask.put(EasyMock.capture(capturedRecords));
    EasyMock.expectLastCall().anyTimes();
    return capturedRecords;
}
Also used : SinkRecord(org.apache.kafka.connect.sink.SinkRecord) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) IAnswer(org.easymock.IAnswer) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection)

Example 14 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project storm by apache.

the class KafkaSpoutRebalanceTest method emitOneMessagePerPartitionThenRevokeOnePartition.

//Returns messageIds in order of emission
private List<KafkaSpoutMessageId> emitOneMessagePerPartitionThenRevokeOnePartition(KafkaSpout<String, String> spout, TopicPartition partitionThatWillBeRevoked, TopicPartition assignedPartition) {
    //Setup spout with mock consumer so we can get at the rebalance listener
    spout.open(conf, contextMock, collectorMock);
    spout.activate();
    ArgumentCaptor<ConsumerRebalanceListener> rebalanceListenerCapture = ArgumentCaptor.forClass(ConsumerRebalanceListener.class);
    verify(consumerMock).subscribe(anyCollection(), rebalanceListenerCapture.capture());
    //Assign partitions to the spout
    ConsumerRebalanceListener consumerRebalanceListener = rebalanceListenerCapture.getValue();
    List<TopicPartition> assignedPartitions = new ArrayList<>();
    assignedPartitions.add(partitionThatWillBeRevoked);
    assignedPartitions.add(assignedPartition);
    consumerRebalanceListener.onPartitionsAssigned(assignedPartitions);
    //Make the consumer return a single message for each partition
    Map<TopicPartition, List<ConsumerRecord<String, String>>> firstPartitionRecords = new HashMap<>();
    firstPartitionRecords.put(partitionThatWillBeRevoked, Collections.singletonList(new ConsumerRecord(partitionThatWillBeRevoked.topic(), partitionThatWillBeRevoked.partition(), 0L, "key", "value")));
    Map<TopicPartition, List<ConsumerRecord<String, String>>> secondPartitionRecords = new HashMap<>();
    secondPartitionRecords.put(assignedPartition, Collections.singletonList(new ConsumerRecord(assignedPartition.topic(), assignedPartition.partition(), 0L, "key", "value")));
    when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords(firstPartitionRecords)).thenReturn(new ConsumerRecords(secondPartitionRecords)).thenReturn(new ConsumerRecords(Collections.emptyMap()));
    //Emit the messages
    spout.nextTuple();
    ArgumentCaptor<KafkaSpoutMessageId> messageIdForRevokedPartition = ArgumentCaptor.forClass(KafkaSpoutMessageId.class);
    verify(collectorMock).emit(anyObject(), anyObject(), messageIdForRevokedPartition.capture());
    reset(collectorMock);
    spout.nextTuple();
    ArgumentCaptor<KafkaSpoutMessageId> messageIdForAssignedPartition = ArgumentCaptor.forClass(KafkaSpoutMessageId.class);
    verify(collectorMock).emit(anyObject(), anyObject(), messageIdForAssignedPartition.capture());
    //Now rebalance
    consumerRebalanceListener.onPartitionsRevoked(assignedPartitions);
    consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(assignedPartition));
    List<KafkaSpoutMessageId> emittedMessageIds = new ArrayList<>();
    emittedMessageIds.add(messageIdForRevokedPartition.getValue());
    emittedMessageIds.add(messageIdForAssignedPartition.getValue());
    return emittedMessageIds;
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) List(java.util.List)

Example 15 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project ignite by apache.

the class IgniteSourceConnectorTest method checkDataDelivered.

/**
     * Checks if events were delivered to Kafka server.
     *
     * @param expectedEventsCnt Expected events count.
     * @throws Exception If failed.
     */
private void checkDataDelivered(final int expectedEventsCnt) throws Exception {
    Properties props = new Properties();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBroker.getBrokerAddress());
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "test-grp");
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, 1);
    props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 10000);
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.ignite.stream.kafka.connect.serialization.CacheEventDeserializer");
    final KafkaConsumer<String, CacheEvent> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(TOPICS));
    final AtomicInteger evtCnt = new AtomicInteger();
    try {
        // Wait for expected events count.
        GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                ConsumerRecords<String, CacheEvent> records = consumer.poll(10);
                for (ConsumerRecord<String, CacheEvent> record : records) {
                    info("Record: " + record);
                    evtCnt.getAndIncrement();
                }
                return evtCnt.get() >= expectedEventsCnt;
            }
        }, 20_000);
        info("Waiting for unexpected records for 5 secs.");
        assertFalse(GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                ConsumerRecords<String, CacheEvent> records = consumer.poll(10);
                for (ConsumerRecord<String, CacheEvent> record : records) {
                    error("Unexpected record: " + record);
                    evtCnt.getAndIncrement();
                }
                return evtCnt.get() > expectedEventsCnt;
            }
        }, 5_000));
    } catch (WakeupException ignored) {
    // ignore for shutdown.
    } finally {
        consumer.close();
        assertEquals(expectedEventsCnt, evtCnt.get());
    }
}
Also used : GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Properties(java.util.Properties) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) WakeupException(org.apache.kafka.common.errors.WakeupException) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) CacheEvent(org.apache.ignite.events.CacheEvent)

Aggregations

ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)15 HashMap (java.util.HashMap)11 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)10 TopicPartition (org.apache.kafka.common.TopicPartition)10 Properties (java.util.Properties)8 Test (org.junit.Test)7 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)7 AtomicReference (java.util.concurrent.atomic.AtomicReference)6 UnregisteredMetricsGroup (org.apache.flink.metrics.groups.UnregisteredMetricsGroup)6 KafkaConsumerThread (org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread)6 KafkaTopicPartition (org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition)6 TestProcessingTimeService (org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService)6 KeyedDeserializationSchemaWrapper (org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper)6 SimpleStringSchema (org.apache.flink.streaming.util.serialization.SimpleStringSchema)6 Mockito.anyLong (org.mockito.Mockito.anyLong)6 InvocationOnMock (org.mockito.invocation.InvocationOnMock)6 List (java.util.List)4 MultiShotLatch (org.apache.flink.core.testutils.MultiShotLatch)4 Handover (org.apache.flink.streaming.connectors.kafka.internal.Handover)4 Collection (java.util.Collection)3