Search in sources :

Example 46 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class RegexSourceIntegrationTest method testMultipleConsumersCanReadFromPartitionedTopic.

@Test
public void testMultipleConsumersCanReadFromPartitionedTopic() throws Exception {
    KafkaStreams partitionedStreamsLeader = null;
    KafkaStreams partitionedStreamsFollower = null;
    try {
        final Serde<String> stringSerde = Serdes.String();
        final StreamsBuilder builderLeader = new StreamsBuilder();
        final StreamsBuilder builderFollower = new StreamsBuilder();
        final List<String> expectedAssignment = Arrays.asList(PARTITIONED_TOPIC_1, PARTITIONED_TOPIC_2);
        final KStream<String, String> partitionedStreamLeader = builderLeader.stream(Pattern.compile("partitioned-\\d"));
        final KStream<String, String> partitionedStreamFollower = builderFollower.stream(Pattern.compile("partitioned-\\d"));
        partitionedStreamLeader.to(outputTopic, Produced.with(stringSerde, stringSerde));
        partitionedStreamFollower.to(outputTopic, Produced.with(stringSerde, stringSerde));
        final List<String> leaderAssignment = new CopyOnWriteArrayList<>();
        final List<String> followerAssignment = new CopyOnWriteArrayList<>();
        partitionedStreamsLeader = new KafkaStreams(builderLeader.build(), streamsConfiguration, new DefaultKafkaClientSupplier() {

            @Override
            public Consumer<byte[], byte[]> getConsumer(final Map<String, Object> config) {
                return new KafkaConsumer<byte[], byte[]>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer()) {

                    @Override
                    public void subscribe(final Pattern topics, final ConsumerRebalanceListener listener) {
                        super.subscribe(topics, new TheConsumerRebalanceListener(leaderAssignment, listener));
                    }
                };
            }
        });
        partitionedStreamsFollower = new KafkaStreams(builderFollower.build(), streamsConfiguration, new DefaultKafkaClientSupplier() {

            @Override
            public Consumer<byte[], byte[]> getConsumer(final Map<String, Object> config) {
                return new KafkaConsumer<byte[], byte[]>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer()) {

                    @Override
                    public void subscribe(final Pattern topics, final ConsumerRebalanceListener listener) {
                        super.subscribe(topics, new TheConsumerRebalanceListener(followerAssignment, listener));
                    }
                };
            }
        });
        partitionedStreamsLeader.start();
        partitionedStreamsFollower.start();
        TestUtils.waitForCondition(() -> followerAssignment.equals(expectedAssignment) && leaderAssignment.equals(expectedAssignment), "topic assignment not completed");
    } finally {
        if (partitionedStreamsLeader != null) {
            partitionedStreamsLeader.close();
        }
        if (partitionedStreamsFollower != null) {
            partitionedStreamsFollower.close();
        }
    }
}
Also used : Pattern(java.util.regex.Pattern) KafkaStreams(org.apache.kafka.streams.KafkaStreams) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Map(java.util.Map) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 47 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testFetchedRecordsRaisesOnSerializationErrors.

@Test
public void testFetchedRecordsRaisesOnSerializationErrors() {
    // raise an exception from somewhere in the middle of the fetch response
    // so that we can verify that our position does not advance after raising
    ByteArrayDeserializer deserializer = new ByteArrayDeserializer() {

        int i = 0;

        @Override
        public byte[] deserialize(String topic, byte[] data) {
            if (i++ % 2 == 1) {
                // Should be blocked on the value deserialization of the first record.
                assertEquals("value-1", new String(data, StandardCharsets.UTF_8));
                throw new SerializationException();
            }
            return data;
        }
    };
    buildFetcher(deserializer, deserializer);
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 1);
    client.prepareResponse(matchesOffset(tidp0, 1), fullFetchResponse(tidp0, this.records, Errors.NONE, 100L, 0));
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(time.timer(0));
    // The fetcher should block on Deserialization error
    for (int i = 0; i < 2; i++) {
        try {
            fetcher.collectFetch();
            fail("fetchedRecords should have raised");
        } catch (SerializationException e) {
            // the position should not advance since no data has been returned
            assertEquals(1, subscriptions.position(tp0).offset);
        }
    }
}
Also used : SerializationException(org.apache.kafka.common.errors.SerializationException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Test(org.junit.jupiter.api.Test)

Example 48 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testCompletedFetchRemoval.

@Test
public void testCompletedFetchRemoval() {
    // Ensure the removal of completed fetches that cause an Exception if and only if they contain empty records.
    buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
    assignFromUser(mkSet(tp0, tp1, tp2, tp3));
    subscriptions.seek(tp0, 1);
    subscriptions.seek(tp1, 1);
    subscriptions.seek(tp2, 1);
    subscriptions.seek(tp3, 1);
    assertEquals(1, fetcher.sendFetches());
    Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>();
    partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setRecords(records));
    partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100));
    partitions.put(tidp2, new FetchResponseData.PartitionData().setPartitionIndex(tp2.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(nextRecords));
    partitions.put(tidp3, new FetchResponseData.PartitionData().setPartitionIndex(tp3.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(partialRecords));
    client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
    consumerClient.poll(time.timer(0));
    List<ConsumerRecord<byte[], byte[]>> fetchedRecords = new ArrayList<>();
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchedRecords();
    for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
    assertEquals(fetchedRecords.size(), subscriptions.position(tp1).offset - 1);
    assertEquals(4, subscriptions.position(tp1).offset);
    assertEquals(3, fetchedRecords.size());
    List<OffsetOutOfRangeException> oorExceptions = new ArrayList<>();
    try {
        recordsByPartition = fetchedRecords();
        for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
    } catch (OffsetOutOfRangeException oor) {
        oorExceptions.add(oor);
    }
    // Should have received one OffsetOutOfRangeException for partition tp1
    assertEquals(1, oorExceptions.size());
    OffsetOutOfRangeException oor = oorExceptions.get(0);
    assertTrue(oor.offsetOutOfRangePartitions().containsKey(tp0));
    assertEquals(oor.offsetOutOfRangePartitions().size(), 1);
    recordsByPartition = fetchedRecords();
    for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
    // Should not have received an Exception for tp2.
    assertEquals(6, subscriptions.position(tp2).offset);
    assertEquals(5, fetchedRecords.size());
    int numExceptionsExpected = 3;
    List<KafkaException> kafkaExceptions = new ArrayList<>();
    for (int i = 1; i <= numExceptionsExpected; i++) {
        try {
            recordsByPartition = fetchedRecords();
            for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
        } catch (KafkaException e) {
            kafkaExceptions.add(e);
        }
    }
    // Should have received as much as numExceptionsExpected Kafka exceptions for tp3.
    assertEquals(numExceptionsExpected, kafkaExceptions.size());
}
Also used : ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) LinkedHashMap(java.util.LinkedHashMap) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) KafkaException(org.apache.kafka.common.KafkaException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) Test(org.junit.jupiter.api.Test)

Example 49 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testMultipleAbortMarkers.

@Test
public void testMultipleAbortMarkers() {
    buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    int currentOffset = 0;
    currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
    currentOffset += abortTransaction(buffer, 1L, currentOffset);
    // Duplicate abort -- should be ignored.
    currentOffset += abortTransaction(buffer, 1L, currentOffset);
    // Now commit a transaction.
    currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "commit1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "commit1-2".getBytes(), "value".getBytes()));
    commitTransaction(buffer, 1L, currentOffset);
    buffer.flip();
    List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
    assertTrue(fetchedRecords.containsKey(tp0));
    assertEquals(fetchedRecords.get(tp0).size(), 2);
    List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
    Set<String> committedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2"));
    Set<String> actuallyCommittedKeys = new HashSet<>();
    for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
        actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
    }
    assertEquals(actuallyCommittedKeys, committedKeys);
}
Also used : ByteBuffer(java.nio.ByteBuffer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Example 50 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testFetcherConcurrency.

@Test
public void testFetcherConcurrency() throws Exception {
    int numPartitions = 20;
    Set<TopicPartition> topicPartitions = new HashSet<>();
    for (int i = 0; i < numPartitions; i++) topicPartitions.add(new TopicPartition(topicName, i));
    LogContext logContext = new LogContext();
    buildDependencies(new MetricConfig(), Long.MAX_VALUE, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), logContext);
    fetcher = new Fetcher<byte[], byte[]>(new LogContext(), consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, 2 * numPartitions, true, "", new ByteArrayDeserializer(), new ByteArrayDeserializer(), metadata, subscriptions, metrics, metricsRegistry, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, apiVersions) {

        @Override
        protected FetchSessionHandler sessionHandler(int id) {
            final FetchSessionHandler handler = super.sessionHandler(id);
            if (handler == null)
                return null;
            else {
                return new FetchSessionHandler(new LogContext(), id) {

                    @Override
                    public Builder newBuilder() {
                        verifySessionPartitions();
                        return handler.newBuilder();
                    }

                    @Override
                    public boolean handleResponse(FetchResponse response, short version) {
                        verifySessionPartitions();
                        return handler.handleResponse(response, version);
                    }

                    @Override
                    public void handleError(Throwable t) {
                        verifySessionPartitions();
                        handler.handleError(t);
                    }

                    // Verify that session partitions can be traversed safely.
                    private void verifySessionPartitions() {
                        try {
                            Field field = FetchSessionHandler.class.getDeclaredField("sessionPartitions");
                            field.setAccessible(true);
                            LinkedHashMap<?, ?> sessionPartitions = (LinkedHashMap<?, ?>) field.get(handler);
                            for (Map.Entry<?, ?> entry : sessionPartitions.entrySet()) {
                                // If `sessionPartitions` are modified on another thread, Thread.yield will increase the
                                // possibility of ConcurrentModificationException if appropriate synchronization is not used.
                                Thread.yield();
                            }
                        } catch (Exception e) {
                            throw new RuntimeException(e);
                        }
                    }
                };
            }
        }
    };
    MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, numPartitions), tp -> validLeaderEpoch, topicIds);
    client.updateMetadata(initialMetadataResponse);
    fetchSize = 10000;
    assignFromUser(topicPartitions);
    topicPartitions.forEach(tp -> subscriptions.seek(tp, 0L));
    AtomicInteger fetchesRemaining = new AtomicInteger(1000);
    executorService = Executors.newSingleThreadExecutor();
    Future<?> future = executorService.submit(() -> {
        while (fetchesRemaining.get() > 0) {
            synchronized (consumerClient) {
                if (!client.requests().isEmpty()) {
                    ClientRequest request = client.requests().peek();
                    FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
                    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
                    for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry : fetchRequest.fetchData(topicNames).entrySet()) {
                        TopicIdPartition tp = entry.getKey();
                        long offset = entry.getValue().fetchOffset;
                        responseMap.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(offset + 2).setLastStableOffset(offset + 2).setLogStartOffset(0).setRecords(buildRecords(offset, 2, offset)));
                    }
                    client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
                    consumerClient.poll(time.timer(0));
                }
            }
        }
        return fetchesRemaining.get();
    });
    Map<TopicPartition, Long> nextFetchOffsets = topicPartitions.stream().collect(Collectors.toMap(Function.identity(), t -> 0L));
    while (fetchesRemaining.get() > 0 && !future.isDone()) {
        if (fetcher.sendFetches() == 1) {
            synchronized (consumerClient) {
                consumerClient.poll(time.timer(0));
            }
        }
        if (fetcher.hasCompletedFetches()) {
            Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
            if (!fetchedRecords.isEmpty()) {
                fetchesRemaining.decrementAndGet();
                fetchedRecords.forEach((tp, records) -> {
                    assertEquals(2, records.size());
                    long nextOffset = nextFetchOffsets.get(tp);
                    assertEquals(nextOffset, records.get(0).offset());
                    assertEquals(nextOffset + 1, records.get(1).offset());
                    nextFetchOffsets.put(tp, nextOffset + 2);
                });
            }
        }
    }
    assertEquals(0, future.get());
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) BeforeEach(org.junit.jupiter.api.BeforeEach) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) DefaultRecordBatch(org.apache.kafka.common.record.DefaultRecordBatch) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Collections.singletonList(java.util.Collections.singletonList) ClientUtils(org.apache.kafka.clients.ClientUtils) Cluster(org.apache.kafka.common.Cluster) Future(java.util.concurrent.Future) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) DataOutputStream(java.io.DataOutputStream) ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) Arrays.asList(java.util.Arrays.asList) RecordBatch(org.apache.kafka.common.record.RecordBatch) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) FetchResponse(org.apache.kafka.common.requests.FetchResponse) TimestampType(org.apache.kafka.common.record.TimestampType) Sensor(org.apache.kafka.common.metrics.Sensor) CompressionType(org.apache.kafka.common.record.CompressionType) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) StandardCharsets(java.nio.charset.StandardCharsets) Executors(java.util.concurrent.Executors) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Metrics(org.apache.kafka.common.metrics.Metrics) ApiMessageType(org.apache.kafka.common.message.ApiMessageType) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) FetchRequest(org.apache.kafka.common.requests.FetchRequest) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) Records(org.apache.kafka.common.record.Records) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) OffsetForLeaderPartition(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition) ClientDnsLookup(org.apache.kafka.clients.ClientDnsLookup) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) RequestTestUtils(org.apache.kafka.common.requests.RequestTestUtils) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ListOffsetsPartitionResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse) OffsetForLeaderEpochRequestData(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData) INVALID_SESSION_ID(org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) UNDEFINED_EPOCH(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH) NetworkClient(org.apache.kafka.clients.NetworkClient) Deserializer(org.apache.kafka.common.serialization.Deserializer) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TestUtils.assertOptional(org.apache.kafka.test.TestUtils.assertOptional) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MockSelector(org.apache.kafka.test.MockSelector) Field(java.lang.reflect.Field) ApiVersions(org.apache.kafka.clients.ApiVersions) MetricNameTemplate(org.apache.kafka.common.MetricNameTemplate) OffsetForLeaderEpochResponseData(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData) Assertions.assertArrayEquals(org.junit.jupiter.api.Assertions.assertArrayEquals) AfterEach(org.junit.jupiter.api.AfterEach) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) BytesDeserializer(org.apache.kafka.common.serialization.BytesDeserializer) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ControlRecordType(org.apache.kafka.common.record.ControlRecordType) ByteBuffer(java.nio.ByteBuffer) ClientRequest(org.apache.kafka.clients.ClientRequest) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) Record(org.apache.kafka.common.record.Record) Collections.singleton(java.util.Collections.singleton) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) BufferSupplier(org.apache.kafka.common.utils.BufferSupplier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MetricName(org.apache.kafka.common.MetricName) OffsetForLeaderTopicResult(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.OffsetForLeaderTopicResult) ListOffsetsTopic(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) Collections.emptyList(java.util.Collections.emptyList) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) Collectors(java.util.stream.Collectors) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) Test(org.junit.jupiter.api.Test) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) Header(org.apache.kafka.common.header.Header) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) Optional(java.util.Optional) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) OffsetsForLeaderEpochResponse(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse) Uuid(org.apache.kafka.common.Uuid) EpochEndOffset(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset) Metadata(org.apache.kafka.clients.Metadata) EndTransactionMarker(org.apache.kafka.common.record.EndTransactionMarker) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) HashMap(java.util.HashMap) Function(java.util.function.Function) HashSet(java.util.HashSet) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) Collections.singletonMap(java.util.Collections.singletonMap) ExecutorService(java.util.concurrent.ExecutorService) Utils(org.apache.kafka.common.utils.Utils) UNDEFINED_EPOCH_OFFSET(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) MockClient(org.apache.kafka.clients.MockClient) ListOffsetsPartition(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition) Iterator(java.util.Iterator) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) TimeUnit(java.util.concurrent.TimeUnit) IsolationLevel(org.apache.kafka.common.IsolationLevel) DelayedReceive(org.apache.kafka.test.DelayedReceive) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) OffsetsForLeaderEpochRequest(org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest) Collections(java.util.Collections) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) LinkedHashMap(java.util.LinkedHashMap) Field(java.lang.reflect.Field) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ClientRequest(org.apache.kafka.clients.ClientRequest) HashSet(java.util.HashSet) LogContext(org.apache.kafka.common.utils.LogContext) FetchResponse(org.apache.kafka.common.requests.FetchResponse) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) Collections.emptyMap(java.util.Collections.emptyMap) Test(org.junit.jupiter.api.Test)

Aggregations

ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)59 TopicPartition (org.apache.kafka.common.TopicPartition)24 ArrayList (java.util.ArrayList)22 Test (org.junit.Test)22 Test (org.junit.jupiter.api.Test)22 List (java.util.List)17 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)17 HashMap (java.util.HashMap)16 ByteBuffer (java.nio.ByteBuffer)14 LinkedHashMap (java.util.LinkedHashMap)14 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)14 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)14 HashSet (java.util.HashSet)10 Properties (java.util.Properties)10 Metrics (org.apache.kafka.common.metrics.Metrics)10 Arrays.asList (java.util.Arrays.asList)9 Collections.emptyList (java.util.Collections.emptyList)9 Collections.singletonList (java.util.Collections.singletonList)9 Map (java.util.Map)9 ConsumerRebalanceListener (org.apache.kafka.clients.consumer.ConsumerRebalanceListener)7