Search in sources :

Example 11 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project apache-kafka-on-k8s by banzaicloud.

the class RecordAccumulatorTest method testRetryBackoff.

@Test
public void testRetryBackoff() throws Exception {
    long lingerMs = Long.MAX_VALUE / 4;
    long retryBackoffMs = Long.MAX_VALUE / 2;
    final RecordAccumulator accum = new RecordAccumulator(logContext, 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, CompressionType.NONE, lingerMs, retryBackoffMs, metrics, time, new ApiVersions(), null);
    long now = time.milliseconds();
    accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs);
    RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, now + lingerMs + 1);
    assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes);
    Map<Integer, List<ProducerBatch>> batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + lingerMs + 1);
    assertEquals("Node1 should be the only ready node.", 1, batches.size());
    assertEquals("Partition 0 should only have one batch drained.", 1, batches.get(0).size());
    // Reenqueue the batch
    now = time.milliseconds();
    accum.reenqueue(batches.get(0).get(0), now);
    // Put message for partition 1 into accumulator
    accum.append(tp2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs);
    result = accum.ready(cluster, now + lingerMs + 1);
    assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes);
    // tp1 should backoff while tp2 should not
    batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + lingerMs + 1);
    assertEquals("Node1 should be the only ready node.", 1, batches.size());
    assertEquals("Node1 should only have one batch drained.", 1, batches.get(0).size());
    assertEquals("Node1 should only have one batch for partition 1.", tp2, batches.get(0).get(0).topicPartition);
    // Partition 0 can be drained after retry backoff
    result = accum.ready(cluster, now + retryBackoffMs + 1);
    assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes);
    batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + retryBackoffMs + 1);
    assertEquals("Node1 should be the only ready node.", 1, batches.size());
    assertEquals("Node1 should only have one batch drained.", 1, batches.get(0).size());
    assertEquals("Node1 should only have one batch for partition 0.", tp1, batches.get(0).get(0).topicPartition);
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ApiVersions(org.apache.kafka.clients.ApiVersions) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ArrayList(java.util.ArrayList) Arrays.asList(java.util.Arrays.asList) List(java.util.List) Test(org.junit.Test)

Example 12 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project apache-kafka-on-k8s by banzaicloud.

the class SenderTest method testQuotaMetrics.

/*
     * Send multiple requests. Verify that the client side quota metrics have the right values
     */
@Test
@SuppressWarnings("deprecation")
public void testQuotaMetrics() throws Exception {
    MockSelector selector = new MockSelector(time);
    Sensor throttleTimeSensor = Sender.throttleTimeSensor(this.senderMetricsRegistry);
    Cluster cluster = TestUtils.singletonCluster("test", 1);
    Node node = cluster.nodes().get(0);
    NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, time, true, new ApiVersions(), throttleTimeSensor, logContext);
    short apiVersionsResponseVersion = ApiKeys.API_VERSIONS.latestVersion();
    ByteBuffer buffer = ApiVersionsResponse.createApiVersionsResponse(400, RecordBatch.CURRENT_MAGIC_VALUE).serialize(apiVersionsResponseVersion, new ResponseHeader(0));
    selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer)));
    while (!client.ready(node, time.milliseconds())) client.poll(1, time.milliseconds());
    selector.clear();
    for (int i = 1; i <= 3; i++) {
        int throttleTimeMs = 100 * i;
        ProduceRequest.Builder builder = ProduceRequest.Builder.forCurrentMagic((short) 1, 1000, Collections.<TopicPartition, MemoryRecords>emptyMap());
        ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true, null);
        client.send(request, time.milliseconds());
        client.poll(1, time.milliseconds());
        ProduceResponse response = produceResponse(tp0, i, Errors.NONE, throttleTimeMs);
        buffer = response.serialize(ApiKeys.PRODUCE.latestVersion(), new ResponseHeader(request.correlationId()));
        selector.completeReceive(new NetworkReceive(node.idString(), buffer));
        client.poll(1, time.milliseconds());
        selector.clear();
    }
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric avgMetric = allMetrics.get(this.senderMetricsRegistry.produceThrottleTimeAvg);
    KafkaMetric maxMetric = allMetrics.get(this.senderMetricsRegistry.produceThrottleTimeMax);
    // Throttle times are ApiVersions=400, Produce=(100, 200, 300)
    assertEquals(250, avgMetric.value(), EPS);
    assertEquals(400, maxMetric.value(), EPS);
    client.close();
}
Also used : ResponseHeader(org.apache.kafka.common.requests.ResponseHeader) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) ProduceResponse(org.apache.kafka.common.requests.ProduceResponse) Node(org.apache.kafka.common.Node) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) Cluster(org.apache.kafka.common.Cluster) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) ByteBuffer(java.nio.ByteBuffer) MockSelector(org.apache.kafka.test.MockSelector) MetricName(org.apache.kafka.common.MetricName) NetworkClient(org.apache.kafka.clients.NetworkClient) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) DelayedReceive(org.apache.kafka.test.DelayedReceive) ClientRequest(org.apache.kafka.clients.ClientRequest) Sensor(org.apache.kafka.common.metrics.Sensor) Test(org.junit.Test)

Example 13 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testQuotaMetrics.

/*
     * Send multiple requests. Verify that the client side quota metrics have the right values
     */
@Test
public void testQuotaMetrics() throws Exception {
    MockSelector selector = new MockSelector(time);
    Sensor throttleTimeSensor = Fetcher.throttleTimeSensor(metrics, metricsRegistry);
    Cluster cluster = TestUtils.singletonCluster("test", 1);
    Node node = cluster.nodes().get(0);
    NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, time, true, new ApiVersions(), throttleTimeSensor, new LogContext());
    short apiVersionsResponseVersion = ApiKeys.API_VERSIONS.latestVersion();
    ByteBuffer buffer = ApiVersionsResponse.createApiVersionsResponse(400, RecordBatch.CURRENT_MAGIC_VALUE).serialize(apiVersionsResponseVersion, new ResponseHeader(0));
    selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer)));
    while (!client.ready(node, time.milliseconds())) client.poll(1, time.milliseconds());
    selector.clear();
    for (int i = 1; i <= 3; i++) {
        int throttleTimeMs = 100 * i;
        FetchRequest.Builder builder = FetchRequest.Builder.forConsumer(100, 100, new LinkedHashMap<TopicPartition, PartitionData>());
        ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true, null);
        client.send(request, time.milliseconds());
        client.poll(1, time.milliseconds());
        FetchResponse response = fullFetchResponse(tp0, nextRecords, Errors.NONE, i, throttleTimeMs);
        buffer = response.serialize(ApiKeys.FETCH.latestVersion(), new ResponseHeader(request.correlationId()));
        selector.completeReceive(new NetworkReceive(node.idString(), buffer));
        client.poll(1, time.milliseconds());
        selector.clear();
    }
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric avgMetric = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchThrottleTimeAvg));
    KafkaMetric maxMetric = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchThrottleTimeMax));
    // Throttle times are ApiVersions=400, Fetch=(100, 200, 300)
    assertEquals(250, avgMetric.value(), EPSILON);
    assertEquals(400, maxMetric.value(), EPSILON);
    client.close();
}
Also used : ResponseHeader(org.apache.kafka.common.requests.ResponseHeader) Node(org.apache.kafka.common.Node) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) Cluster(org.apache.kafka.common.Cluster) LogContext(org.apache.kafka.common.utils.LogContext) FetchResponse(org.apache.kafka.common.requests.FetchResponse) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) ByteBuffer(java.nio.ByteBuffer) MockSelector(org.apache.kafka.test.MockSelector) MetricName(org.apache.kafka.common.MetricName) NetworkClient(org.apache.kafka.clients.NetworkClient) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) FetchRequest(org.apache.kafka.common.requests.FetchRequest) DelayedReceive(org.apache.kafka.test.DelayedReceive) ClientRequest(org.apache.kafka.clients.ClientRequest) Sensor(org.apache.kafka.common.metrics.Sensor) Test(org.junit.Test)

Example 14 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project kafka by apache.

the class FetcherTest method testFetcherConcurrency.

@Test
public void testFetcherConcurrency() throws Exception {
    int numPartitions = 20;
    Set<TopicPartition> topicPartitions = new HashSet<>();
    for (int i = 0; i < numPartitions; i++) topicPartitions.add(new TopicPartition(topicName, i));
    LogContext logContext = new LogContext();
    buildDependencies(new MetricConfig(), Long.MAX_VALUE, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), logContext);
    fetcher = new Fetcher<byte[], byte[]>(new LogContext(), consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, 2 * numPartitions, true, "", new ByteArrayDeserializer(), new ByteArrayDeserializer(), metadata, subscriptions, metrics, metricsRegistry, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, apiVersions) {

        @Override
        protected FetchSessionHandler sessionHandler(int id) {
            final FetchSessionHandler handler = super.sessionHandler(id);
            if (handler == null)
                return null;
            else {
                return new FetchSessionHandler(new LogContext(), id) {

                    @Override
                    public Builder newBuilder() {
                        verifySessionPartitions();
                        return handler.newBuilder();
                    }

                    @Override
                    public boolean handleResponse(FetchResponse response, short version) {
                        verifySessionPartitions();
                        return handler.handleResponse(response, version);
                    }

                    @Override
                    public void handleError(Throwable t) {
                        verifySessionPartitions();
                        handler.handleError(t);
                    }

                    // Verify that session partitions can be traversed safely.
                    private void verifySessionPartitions() {
                        try {
                            Field field = FetchSessionHandler.class.getDeclaredField("sessionPartitions");
                            field.setAccessible(true);
                            LinkedHashMap<?, ?> sessionPartitions = (LinkedHashMap<?, ?>) field.get(handler);
                            for (Map.Entry<?, ?> entry : sessionPartitions.entrySet()) {
                                // If `sessionPartitions` are modified on another thread, Thread.yield will increase the
                                // possibility of ConcurrentModificationException if appropriate synchronization is not used.
                                Thread.yield();
                            }
                        } catch (Exception e) {
                            throw new RuntimeException(e);
                        }
                    }
                };
            }
        }
    };
    MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, numPartitions), tp -> validLeaderEpoch, topicIds);
    client.updateMetadata(initialMetadataResponse);
    fetchSize = 10000;
    assignFromUser(topicPartitions);
    topicPartitions.forEach(tp -> subscriptions.seek(tp, 0L));
    AtomicInteger fetchesRemaining = new AtomicInteger(1000);
    executorService = Executors.newSingleThreadExecutor();
    Future<?> future = executorService.submit(() -> {
        while (fetchesRemaining.get() > 0) {
            synchronized (consumerClient) {
                if (!client.requests().isEmpty()) {
                    ClientRequest request = client.requests().peek();
                    FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
                    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
                    for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry : fetchRequest.fetchData(topicNames).entrySet()) {
                        TopicIdPartition tp = entry.getKey();
                        long offset = entry.getValue().fetchOffset;
                        responseMap.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(offset + 2).setLastStableOffset(offset + 2).setLogStartOffset(0).setRecords(buildRecords(offset, 2, offset)));
                    }
                    client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
                    consumerClient.poll(time.timer(0));
                }
            }
        }
        return fetchesRemaining.get();
    });
    Map<TopicPartition, Long> nextFetchOffsets = topicPartitions.stream().collect(Collectors.toMap(Function.identity(), t -> 0L));
    while (fetchesRemaining.get() > 0 && !future.isDone()) {
        if (fetcher.sendFetches() == 1) {
            synchronized (consumerClient) {
                consumerClient.poll(time.timer(0));
            }
        }
        if (fetcher.hasCompletedFetches()) {
            Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
            if (!fetchedRecords.isEmpty()) {
                fetchesRemaining.decrementAndGet();
                fetchedRecords.forEach((tp, records) -> {
                    assertEquals(2, records.size());
                    long nextOffset = nextFetchOffsets.get(tp);
                    assertEquals(nextOffset, records.get(0).offset());
                    assertEquals(nextOffset + 1, records.get(1).offset());
                    nextFetchOffsets.put(tp, nextOffset + 2);
                });
            }
        }
    }
    assertEquals(0, future.get());
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) BeforeEach(org.junit.jupiter.api.BeforeEach) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) DefaultRecordBatch(org.apache.kafka.common.record.DefaultRecordBatch) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Collections.singletonList(java.util.Collections.singletonList) ClientUtils(org.apache.kafka.clients.ClientUtils) Cluster(org.apache.kafka.common.Cluster) Future(java.util.concurrent.Future) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) DataOutputStream(java.io.DataOutputStream) ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) Arrays.asList(java.util.Arrays.asList) RecordBatch(org.apache.kafka.common.record.RecordBatch) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) FetchResponse(org.apache.kafka.common.requests.FetchResponse) TimestampType(org.apache.kafka.common.record.TimestampType) Sensor(org.apache.kafka.common.metrics.Sensor) CompressionType(org.apache.kafka.common.record.CompressionType) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) StandardCharsets(java.nio.charset.StandardCharsets) Executors(java.util.concurrent.Executors) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Metrics(org.apache.kafka.common.metrics.Metrics) ApiMessageType(org.apache.kafka.common.message.ApiMessageType) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) FetchRequest(org.apache.kafka.common.requests.FetchRequest) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) Records(org.apache.kafka.common.record.Records) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) OffsetForLeaderPartition(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition) ClientDnsLookup(org.apache.kafka.clients.ClientDnsLookup) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) RequestTestUtils(org.apache.kafka.common.requests.RequestTestUtils) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ListOffsetsPartitionResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse) OffsetForLeaderEpochRequestData(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData) INVALID_SESSION_ID(org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) UNDEFINED_EPOCH(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH) NetworkClient(org.apache.kafka.clients.NetworkClient) Deserializer(org.apache.kafka.common.serialization.Deserializer) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TestUtils.assertOptional(org.apache.kafka.test.TestUtils.assertOptional) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MockSelector(org.apache.kafka.test.MockSelector) Field(java.lang.reflect.Field) ApiVersions(org.apache.kafka.clients.ApiVersions) MetricNameTemplate(org.apache.kafka.common.MetricNameTemplate) OffsetForLeaderEpochResponseData(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData) Assertions.assertArrayEquals(org.junit.jupiter.api.Assertions.assertArrayEquals) AfterEach(org.junit.jupiter.api.AfterEach) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) BytesDeserializer(org.apache.kafka.common.serialization.BytesDeserializer) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ControlRecordType(org.apache.kafka.common.record.ControlRecordType) ByteBuffer(java.nio.ByteBuffer) ClientRequest(org.apache.kafka.clients.ClientRequest) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) Record(org.apache.kafka.common.record.Record) Collections.singleton(java.util.Collections.singleton) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) BufferSupplier(org.apache.kafka.common.utils.BufferSupplier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MetricName(org.apache.kafka.common.MetricName) OffsetForLeaderTopicResult(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.OffsetForLeaderTopicResult) ListOffsetsTopic(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) Collections.emptyList(java.util.Collections.emptyList) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) Collectors(java.util.stream.Collectors) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) Test(org.junit.jupiter.api.Test) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) Header(org.apache.kafka.common.header.Header) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) Optional(java.util.Optional) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) OffsetsForLeaderEpochResponse(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse) Uuid(org.apache.kafka.common.Uuid) EpochEndOffset(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset) Metadata(org.apache.kafka.clients.Metadata) EndTransactionMarker(org.apache.kafka.common.record.EndTransactionMarker) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) HashMap(java.util.HashMap) Function(java.util.function.Function) HashSet(java.util.HashSet) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) Collections.singletonMap(java.util.Collections.singletonMap) ExecutorService(java.util.concurrent.ExecutorService) Utils(org.apache.kafka.common.utils.Utils) UNDEFINED_EPOCH_OFFSET(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) MockClient(org.apache.kafka.clients.MockClient) ListOffsetsPartition(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition) Iterator(java.util.Iterator) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) TimeUnit(java.util.concurrent.TimeUnit) IsolationLevel(org.apache.kafka.common.IsolationLevel) DelayedReceive(org.apache.kafka.test.DelayedReceive) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) OffsetsForLeaderEpochRequest(org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest) Collections(java.util.Collections) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) LinkedHashMap(java.util.LinkedHashMap) Field(java.lang.reflect.Field) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ClientRequest(org.apache.kafka.clients.ClientRequest) HashSet(java.util.HashSet) LogContext(org.apache.kafka.common.utils.LogContext) FetchResponse(org.apache.kafka.common.requests.FetchResponse) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) Collections.emptyMap(java.util.Collections.emptyMap) Test(org.junit.jupiter.api.Test)

Example 15 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project kafka by apache.

the class KafkaConsumerTest method newConsumer.

private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, SubscriptionState subscription, ConsumerMetadata metadata, ConsumerPartitionAssignor assignor, boolean autoCommitEnabled, String groupId, Optional<String> groupInstanceId, Optional<Deserializer<String>> valueDeserializer, boolean throwOnStableOffsetNotSupported) {
    String clientId = "mock-consumer";
    String metricGroupPrefix = "consumer";
    long retryBackoffMs = 100;
    int minBytes = 1;
    int maxBytes = Integer.MAX_VALUE;
    int maxWaitMs = 500;
    int fetchSize = 1024 * 1024;
    int maxPollRecords = Integer.MAX_VALUE;
    boolean checkCrcs = true;
    int rebalanceTimeoutMs = 60000;
    Deserializer<String> keyDeserializer = new StringDeserializer();
    Deserializer<String> deserializer = valueDeserializer.orElse(new StringDeserializer());
    List<ConsumerPartitionAssignor> assignors = singletonList(assignor);
    ConsumerInterceptors<String, String> interceptors = new ConsumerInterceptors<>(Collections.emptyList());
    Metrics metrics = new Metrics(time);
    ConsumerMetrics metricsRegistry = new ConsumerMetrics(metricGroupPrefix);
    LogContext loggerFactory = new LogContext();
    ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, retryBackoffMs, requestTimeoutMs, heartbeatIntervalMs);
    ConsumerCoordinator consumerCoordinator = null;
    if (groupId != null) {
        GroupRebalanceConfig rebalanceConfig = new GroupRebalanceConfig(sessionTimeoutMs, rebalanceTimeoutMs, heartbeatIntervalMs, groupId, groupInstanceId, retryBackoffMs, true);
        consumerCoordinator = new ConsumerCoordinator(rebalanceConfig, loggerFactory, consumerClient, assignors, metadata, subscription, metrics, metricGroupPrefix, time, autoCommitEnabled, autoCommitIntervalMs, interceptors, throwOnStableOffsetNotSupported);
    }
    Fetcher<String, String> fetcher = new Fetcher<>(loggerFactory, consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, maxPollRecords, checkCrcs, "", keyDeserializer, deserializer, metadata, subscription, metrics, metricsRegistry.fetcherMetrics, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, new ApiVersions());
    return new KafkaConsumer<>(loggerFactory, clientId, consumerCoordinator, keyDeserializer, deserializer, fetcher, interceptors, time, consumerClient, metrics, subscription, metadata, retryBackoffMs, requestTimeoutMs, defaultApiTimeoutMs, assignors, groupId);
}
Also used : ConsumerInterceptors(org.apache.kafka.clients.consumer.internals.ConsumerInterceptors) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ConsumerCoordinator(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator) LogContext(org.apache.kafka.common.utils.LogContext) GroupRebalanceConfig(org.apache.kafka.clients.GroupRebalanceConfig) Metrics(org.apache.kafka.common.metrics.Metrics) ConsumerMetrics(org.apache.kafka.clients.consumer.internals.ConsumerMetrics) ConsumerNetworkClient(org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) Fetcher(org.apache.kafka.clients.consumer.internals.Fetcher) ConsumerMetrics(org.apache.kafka.clients.consumer.internals.ConsumerMetrics)

Aggregations

ApiVersions (org.apache.kafka.clients.ApiVersions)24 NodeApiVersions (org.apache.kafka.clients.NodeApiVersions)22 Node (org.apache.kafka.common.Node)13 Test (org.junit.jupiter.api.Test)12 LogContext (org.apache.kafka.common.utils.LogContext)10 NetworkClient (org.apache.kafka.clients.NetworkClient)8 Cluster (org.apache.kafka.common.Cluster)7 Metrics (org.apache.kafka.common.metrics.Metrics)7 ByteBuffer (java.nio.ByteBuffer)6 ClientRequest (org.apache.kafka.clients.ClientRequest)6 MetricName (org.apache.kafka.common.MetricName)6 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)6 Sensor (org.apache.kafka.common.metrics.Sensor)6 NetworkReceive (org.apache.kafka.common.network.NetworkReceive)6 ApiVersionsResponse (org.apache.kafka.common.requests.ApiVersionsResponse)6 DelayedReceive (org.apache.kafka.test.DelayedReceive)6 MockSelector (org.apache.kafka.test.MockSelector)6 TopicPartition (org.apache.kafka.common.TopicPartition)5 ArrayList (java.util.ArrayList)4 Arrays.asList (java.util.Arrays.asList)4