Search in sources :

Example 66 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project druid by druid-io.

the class KafkaIndexTask method sendResetRequestAndWait.

private void sendResetRequestAndWait(Map<TopicPartition, Long> outOfRangePartitions, TaskToolbox taskToolbox) throws IOException {
    Map<Integer, Long> partitionOffsetMap = Maps.newHashMap();
    for (Map.Entry<TopicPartition, Long> outOfRangePartition : outOfRangePartitions.entrySet()) {
        partitionOffsetMap.put(outOfRangePartition.getKey().partition(), outOfRangePartition.getValue());
    }
    boolean result = taskToolbox.getTaskActionClient().submit(new ResetDataSourceMetadataAction(getDataSource(), new KafkaDataSourceMetadata(new KafkaPartitions(ioConfig.getStartPartitions().getTopic(), partitionOffsetMap))));
    if (result) {
        log.makeAlert("Resetting Kafka offsets for datasource [%s]", getDataSource()).addData("partitions", partitionOffsetMap.keySet()).emit();
        // wait for being killed by supervisor
        try {
            Thread.sleep(Long.MAX_VALUE);
        } catch (InterruptedException e) {
            throw new RuntimeException("Got interrupted while waiting to be killed");
        }
    } else {
        log.makeAlert("Failed to send reset request for partitions [%s]", partitionOffsetMap.keySet()).emit();
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ResetDataSourceMetadataAction(io.druid.indexing.common.actions.ResetDataSourceMetadataAction)

Example 67 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project beam by apache.

the class KafkaIOTest method mkMockConsumer.

// Update mock consumer with records distributed among the given topics, each with given number
// of partitions. Records are assigned in round-robin order among the partitions.
private static MockConsumer<byte[], byte[]> mkMockConsumer(List<String> topics, int partitionsPerTopic, int numElements, OffsetResetStrategy offsetResetStrategy) {
    final List<TopicPartition> partitions = new ArrayList<>();
    final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
    Map<String, List<PartitionInfo>> partitionMap = new HashMap<>();
    for (String topic : topics) {
        List<PartitionInfo> partIds = new ArrayList<>(partitionsPerTopic);
        for (int i = 0; i < partitionsPerTopic; i++) {
            TopicPartition tp = new TopicPartition(topic, i);
            partitions.add(tp);
            partIds.add(new PartitionInfo(topic, i, null, null, null));
            records.put(tp, new ArrayList<ConsumerRecord<byte[], byte[]>>());
        }
        partitionMap.put(topic, partIds);
    }
    int numPartitions = partitions.size();
    final long[] offsets = new long[numPartitions];
    for (int i = 0; i < numElements; i++) {
        int pIdx = i % numPartitions;
        TopicPartition tp = partitions.get(pIdx);
        records.get(tp).add(new ConsumerRecord<>(tp.topic(), tp.partition(), offsets[pIdx]++, // key is 4 byte record id
        ByteBuffer.wrap(new byte[4]).putInt(i).array(), // value is 8 byte record id
        ByteBuffer.wrap(new byte[8]).putLong(i).array()));
    }
    // This is updated when reader assigns partitions.
    final AtomicReference<List<TopicPartition>> assignedPartitions = new AtomicReference<>(Collections.<TopicPartition>emptyList());
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(offsetResetStrategy) {

        // override assign() in order to set offset limits & to save assigned partitions.
        //remove keyword '@Override' here, it can work with Kafka client 0.9 and 0.10 as:
        //1. SpEL can find this function, either input is List or Collection;
        //2. List extends Collection, so super.assign() could find either assign(List)
        //  or assign(Collection).
        public void assign(final List<TopicPartition> assigned) {
            super.assign(assigned);
            assignedPartitions.set(ImmutableList.copyOf(assigned));
            for (TopicPartition tp : assigned) {
                updateBeginningOffsets(ImmutableMap.of(tp, 0L));
                updateEndOffsets(ImmutableMap.of(tp, (long) records.get(tp).size()));
            }
        }

        // Override offsetsForTimes() in order to look up the offsets by timestamp.
        // Remove keyword '@Override' here, Kafka client 0.10.1.0 previous versions does not have
        // this method.
        // Should return Map<TopicPartition, OffsetAndTimestamp>, but 0.10.1.0 previous versions
        // does not have the OffsetAndTimestamp class. So return a raw type and use reflection
        // here.
        @SuppressWarnings("unchecked")
        public Map offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) {
            HashMap<TopicPartition, Object> result = new HashMap<>();
            try {
                Class<?> cls = Class.forName("org.apache.kafka.clients.consumer.OffsetAndTimestamp");
                // OffsetAndTimestamp(long offset, long timestamp)
                Constructor constructor = cls.getDeclaredConstructor(long.class, long.class);
                // In test scope, timestamp == offset.
                for (Map.Entry<TopicPartition, Long> entry : timestampsToSearch.entrySet()) {
                    long maxOffset = offsets[partitions.indexOf(entry.getKey())];
                    Long offset = entry.getValue();
                    if (offset >= maxOffset) {
                        offset = null;
                    }
                    result.put(entry.getKey(), constructor.newInstance(entry.getValue(), offset));
                }
                return result;
            } catch (ClassNotFoundException | IllegalAccessException | InstantiationException | NoSuchMethodException | InvocationTargetException e) {
                throw new RuntimeException(e);
            }
        }
    };
    for (String topic : topics) {
        consumer.updatePartitions(topic, partitionMap.get(topic));
    }
    // MockConsumer does not maintain any relationship between partition seek position and the
    // records added. e.g. if we add 10 records to a partition and then seek to end of the
    // partition, MockConsumer is still going to return the 10 records in next poll. It is
    // our responsibility to make sure currently enqueued records sync with partition offsets.
    // The following task will be called inside each invocation to MockConsumer.poll().
    // We enqueue only the records with the offset >= partition's current position.
    Runnable recordEnqueueTask = new Runnable() {

        @Override
        public void run() {
            // add all the records with offset >= current partition position.
            for (TopicPartition tp : assignedPartitions.get()) {
                long curPos = consumer.position(tp);
                for (ConsumerRecord<byte[], byte[]> r : records.get(tp)) {
                    if (r.offset() >= curPos) {
                        consumer.addRecord(r);
                    }
                }
            }
            consumer.schedulePollTask(this);
        }
    };
    consumer.schedulePollTask(recordEnqueueTask);
    return consumer;
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) PCollectionList(org.apache.beam.sdk.values.PCollectionList) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Constructor(java.lang.reflect.Constructor) AtomicReference(java.util.concurrent.atomic.AtomicReference) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) InvocationTargetException(java.lang.reflect.InvocationTargetException) TopicPartition(org.apache.kafka.common.TopicPartition) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 68 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project flink by apache.

the class Kafka010FetcherTest method testCancellationWhenEmitBlocks.

@Test
public void testCancellationWhenEmitBlocks() throws Exception {
    // ----- some test data -----
    final String topic = "test-topic";
    final int partition = 3;
    final byte[] payload = new byte[] { 1, 2, 3, 4 };
    final List<ConsumerRecord<byte[], byte[]>> records = Arrays.asList(new ConsumerRecord<byte[], byte[]>(topic, partition, 15, payload, payload), new ConsumerRecord<byte[], byte[]>(topic, partition, 16, payload, payload), new ConsumerRecord<byte[], byte[]>(topic, partition, 17, payload, payload));
    final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> data = new HashMap<>();
    data.put(new TopicPartition(topic, partition), records);
    final ConsumerRecords<byte[], byte[]> consumerRecords = new ConsumerRecords<>(data);
    // ----- the test consumer -----
    final KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
    when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {

        @Override
        public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) {
            return consumerRecords;
        }
    });
    whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
    // ----- build a fetcher -----
    BlockingSourceContext<String> sourceContext = new BlockingSourceContext<>();
    Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition(topic, partition), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
    KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
    final Kafka010Fetcher<String> fetcher = new Kafka010Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
    null, /* punctuated watermark extractor */
    new TestProcessingTimeService(), 10, /* watermark interval */
    this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
    // ----- run the fetcher -----
    final AtomicReference<Throwable> error = new AtomicReference<>();
    final Thread fetcherRunner = new Thread("fetcher runner") {

        @Override
        public void run() {
            try {
                fetcher.runFetchLoop();
            } catch (Throwable t) {
                error.set(t);
            }
        }
    };
    fetcherRunner.start();
    // wait until the thread started to emit records to the source context
    sourceContext.waitTillHasBlocker();
    // now we try to cancel the fetcher, including the interruption usually done on the task thread
    // once it has finished, there must be no more thread blocked on the source context
    fetcher.cancel();
    fetcherRunner.interrupt();
    fetcherRunner.join();
    assertFalse("fetcher threads did not properly finish", sourceContext.isStillBlocking());
}
Also used : UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) HashMap(java.util.HashMap) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) Properties(java.util.Properties) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) KeyedDeserializationSchemaWrapper(org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper) List(java.util.List) AtomicReference(java.util.concurrent.atomic.AtomicReference) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) KafkaConsumerThread(org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Mockito.anyLong(org.mockito.Mockito.anyLong) SimpleStringSchema(org.apache.flink.streaming.util.serialization.SimpleStringSchema) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) Kafka010Fetcher(org.apache.flink.streaming.connectors.kafka.internal.Kafka010Fetcher) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 69 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project flink by apache.

the class Kafka010FetcherTest method ensureOffsetsGetCommitted.

@Test
public void ensureOffsetsGetCommitted() throws Exception {
    // test data
    final KafkaTopicPartition testPartition1 = new KafkaTopicPartition("test", 42);
    final KafkaTopicPartition testPartition2 = new KafkaTopicPartition("another", 99);
    final Map<KafkaTopicPartition, Long> testCommitData1 = new HashMap<>();
    testCommitData1.put(testPartition1, 11L);
    testCommitData1.put(testPartition2, 18L);
    final Map<KafkaTopicPartition, Long> testCommitData2 = new HashMap<>();
    testCommitData2.put(testPartition1, 19L);
    testCommitData2.put(testPartition2, 28L);
    final BlockingQueue<Map<TopicPartition, OffsetAndMetadata>> commitStore = new LinkedBlockingQueue<>();
    // ----- the mock consumer with poll(), wakeup(), and commit(A)sync calls ----
    final MultiShotLatch blockerLatch = new MultiShotLatch();
    KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
    when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {

        @Override
        public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) throws InterruptedException {
            blockerLatch.await();
            return ConsumerRecords.empty();
        }
    });
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) {
            blockerLatch.trigger();
            return null;
        }
    }).when(mockConsumer).wakeup();
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) {
            @SuppressWarnings("unchecked") Map<TopicPartition, OffsetAndMetadata> offsets = (Map<TopicPartition, OffsetAndMetadata>) invocation.getArguments()[0];
            OffsetCommitCallback callback = (OffsetCommitCallback) invocation.getArguments()[1];
            commitStore.add(offsets);
            callback.onComplete(offsets, null);
            return null;
        }
    }).when(mockConsumer).commitAsync(Mockito.<Map<TopicPartition, OffsetAndMetadata>>any(), any(OffsetCommitCallback.class));
    // make sure the fetcher creates the mock consumer
    whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
    // ----- create the test fetcher -----
    @SuppressWarnings("unchecked") SourceContext<String> sourceContext = mock(SourceContext.class);
    Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition("test", 42), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
    KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
    final Kafka010Fetcher<String> fetcher = new Kafka010Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic assigner */
    null, /* punctuated assigner */
    new TestProcessingTimeService(), 10, getClass().getClassLoader(), "taskname-with-subtask", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
    // ----- run the fetcher -----
    final AtomicReference<Throwable> error = new AtomicReference<>();
    final Thread fetcherRunner = new Thread("fetcher runner") {

        @Override
        public void run() {
            try {
                fetcher.runFetchLoop();
            } catch (Throwable t) {
                error.set(t);
            }
        }
    };
    fetcherRunner.start();
    // ----- trigger the first offset commit -----
    fetcher.commitInternalOffsetsToKafka(testCommitData1);
    Map<TopicPartition, OffsetAndMetadata> result1 = commitStore.take();
    for (Entry<TopicPartition, OffsetAndMetadata> entry : result1.entrySet()) {
        TopicPartition partition = entry.getKey();
        if (partition.topic().equals("test")) {
            assertEquals(42, partition.partition());
            assertEquals(12L, entry.getValue().offset());
        } else if (partition.topic().equals("another")) {
            assertEquals(99, partition.partition());
            assertEquals(18L, entry.getValue().offset());
        }
    }
    // ----- trigger the second offset commit -----
    fetcher.commitInternalOffsetsToKafka(testCommitData2);
    Map<TopicPartition, OffsetAndMetadata> result2 = commitStore.take();
    for (Entry<TopicPartition, OffsetAndMetadata> entry : result2.entrySet()) {
        TopicPartition partition = entry.getKey();
        if (partition.topic().equals("test")) {
            assertEquals(42, partition.partition());
            assertEquals(20L, entry.getValue().offset());
        } else if (partition.topic().equals("another")) {
            assertEquals(99, partition.partition());
            assertEquals(28L, entry.getValue().offset());
        }
    }
    // ----- test done, wait till the fetcher is done for a clean shutdown -----
    fetcher.cancel();
    fetcherRunner.join();
    // check that there were no errors in the fetcher
    final Throwable caughtError = error.get();
    if (caughtError != null && !(caughtError instanceof Handover.ClosedException)) {
        throw new Exception("Exception in the fetcher", caughtError);
    }
}
Also used : UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) HashMap(java.util.HashMap) MultiShotLatch(org.apache.flink.core.testutils.MultiShotLatch) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Properties(java.util.Properties) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) KeyedDeserializationSchemaWrapper(org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Handover(org.apache.flink.streaming.connectors.kafka.internal.Handover) AtomicReference(java.util.concurrent.atomic.AtomicReference) KafkaConsumerThread(org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread) InvocationOnMock(org.mockito.invocation.InvocationOnMock) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) Mockito.anyLong(org.mockito.Mockito.anyLong) SimpleStringSchema(org.apache.flink.streaming.util.serialization.SimpleStringSchema) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) HashMap(java.util.HashMap) Map(java.util.Map) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) Kafka010Fetcher(org.apache.flink.streaming.connectors.kafka.internal.Kafka010Fetcher) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 70 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project flink by apache.

the class KafkaConsumerThread method run.

// ------------------------------------------------------------------------
@Override
public void run() {
    // early exit check
    if (!running) {
        return;
    }
    // this is the means to talk to FlinkKafkaConsumer's main thread
    final Handover handover = this.handover;
    // This method initializes the KafkaConsumer and guarantees it is torn down properly.
    // This is important, because the consumer has multi-threading issues,
    // including concurrent 'close()' calls.
    final KafkaConsumer<byte[], byte[]> consumer;
    try {
        consumer = new KafkaConsumer<>(kafkaProperties);
    } catch (Throwable t) {
        handover.reportError(t);
        return;
    }
    // from here on, the consumer is guaranteed to be closed properly
    try {
        // The callback invoked by Kafka once an offset commit is complete
        final OffsetCommitCallback offsetCommitCallback = new CommitCallback();
        // tell the consumer which partitions to work with
        consumerCallBridge.assignPartitions(consumer, convertKafkaPartitions(subscribedPartitionStates));
        // register Kafka's very own metrics in Flink's metric reporters
        if (useMetrics) {
            // register Kafka metrics to Flink
            Map<MetricName, ? extends Metric> metrics = consumer.metrics();
            if (metrics == null) {
                // MapR's Kafka implementation returns null here.
                log.info("Consumer implementation does not support metrics");
            } else {
                // we have Kafka metrics, register them
                for (Map.Entry<MetricName, ? extends Metric> metric : metrics.entrySet()) {
                    kafkaMetricGroup.gauge(metric.getKey().name(), new KafkaMetricWrapper(metric.getValue()));
                }
            }
        }
        // early exit check
        if (!running) {
            return;
        }
        // values yet; replace those with actual offsets, according to what the sentinel value represent.
        for (KafkaTopicPartitionState<TopicPartition> partition : subscribedPartitionStates) {
            if (partition.getOffset() == KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET) {
                consumerCallBridge.seekPartitionToBeginning(consumer, partition.getKafkaPartitionHandle());
                partition.setOffset(consumer.position(partition.getKafkaPartitionHandle()) - 1);
            } else if (partition.getOffset() == KafkaTopicPartitionStateSentinel.LATEST_OFFSET) {
                consumerCallBridge.seekPartitionToEnd(consumer, partition.getKafkaPartitionHandle());
                partition.setOffset(consumer.position(partition.getKafkaPartitionHandle()) - 1);
            } else if (partition.getOffset() == KafkaTopicPartitionStateSentinel.GROUP_OFFSET) {
                // the KafkaConsumer by default will automatically seek the consumer position
                // to the committed group offset, so we do not need to do it.
                partition.setOffset(consumer.position(partition.getKafkaPartitionHandle()) - 1);
            } else {
                consumer.seek(partition.getKafkaPartitionHandle(), partition.getOffset() + 1);
            }
        }
        // from now on, external operations may call the consumer
        this.consumer = consumer;
        // the latest bulk of records. may carry across the loop if the thread is woken up
        // from blocking on the handover
        ConsumerRecords<byte[], byte[]> records = null;
        // main fetch loop
        while (running) {
            // check if there is something to commit
            if (!commitInProgress) {
                // get and reset the work-to-be committed, so we don't repeatedly commit the same
                final Map<TopicPartition, OffsetAndMetadata> toCommit = nextOffsetsToCommit.getAndSet(null);
                if (toCommit != null) {
                    log.debug("Sending async offset commit request to Kafka broker");
                    // also record that a commit is already in progress
                    // the order here matters! first set the flag, then send the commit command.
                    commitInProgress = true;
                    consumer.commitAsync(toCommit, offsetCommitCallback);
                }
            }
            // get the next batch of records, unless we did not manage to hand the old batch over
            if (records == null) {
                try {
                    records = consumer.poll(pollTimeout);
                } catch (WakeupException we) {
                    continue;
                }
            }
            try {
                handover.produce(records);
                records = null;
            } catch (Handover.WakeupException e) {
            // fall through the loop
            }
        }
    // end main fetch loop
    } catch (Throwable t) {
        // let the main thread know and exit
        // it may be that this exception comes because the main thread closed the handover, in
        // which case the below reporting is irrelevant, but does not hurt either
        handover.reportError(t);
    } finally {
        // make sure the handover is closed if it is not already closed or has an error
        handover.close();
        // make sure the KafkaConsumer is closed
        try {
            consumer.close();
        } catch (Throwable t) {
            log.warn("Error while closing Kafka consumer", t);
        }
    }
}
Also used : OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) WakeupException(org.apache.kafka.common.errors.WakeupException) MetricName(org.apache.kafka.common.MetricName) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) KafkaMetricWrapper(org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaMetricWrapper) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) Map(java.util.Map)

Aggregations

TopicPartition (org.apache.kafka.common.TopicPartition)1729 HashMap (java.util.HashMap)744 Test (org.junit.Test)519 ArrayList (java.util.ArrayList)416 Map (java.util.Map)361 Test (org.junit.jupiter.api.Test)347 HashSet (java.util.HashSet)281 List (java.util.List)260 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)246 Set (java.util.Set)189 LinkedHashMap (java.util.LinkedHashMap)180 PartitionInfo (org.apache.kafka.common.PartitionInfo)170 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)155 TaskId (org.apache.kafka.streams.processor.TaskId)145 Node (org.apache.kafka.common.Node)140 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)109 KafkaException (org.apache.kafka.common.KafkaException)105 Errors (org.apache.kafka.common.protocol.Errors)105 ByteBuffer (java.nio.ByteBuffer)99 Properties (java.util.Properties)93