Search in sources :

Example 31 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project kafka by apache.

the class Sender method maybeSendAndPollTransactionalRequest.

/**
 * Returns true if a transactional request is sent or polled, or if a FindCoordinator request is enqueued
 */
private boolean maybeSendAndPollTransactionalRequest() {
    if (transactionManager.hasInFlightRequest()) {
        // as long as there are outstanding transactional requests, we simply wait for them to return
        client.poll(retryBackoffMs, time.milliseconds());
        return true;
    }
    if (transactionManager.hasAbortableError() || transactionManager.isAborting()) {
        if (accumulator.hasIncomplete()) {
            // Attempt to get the last error that caused this abort.
            RuntimeException exception = transactionManager.lastError();
            // then this is most likely a case where there was no fatal error.
            if (exception == null) {
                exception = new TransactionAbortedException();
            }
            accumulator.abortUndrainedBatches(exception);
        }
    }
    TransactionManager.TxnRequestHandler nextRequestHandler = transactionManager.nextRequest(accumulator.hasIncomplete());
    if (nextRequestHandler == null)
        return false;
    AbstractRequest.Builder<?> requestBuilder = nextRequestHandler.requestBuilder();
    Node targetNode = null;
    try {
        FindCoordinatorRequest.CoordinatorType coordinatorType = nextRequestHandler.coordinatorType();
        targetNode = coordinatorType != null ? transactionManager.coordinator(coordinatorType) : client.leastLoadedNode(time.milliseconds());
        if (targetNode != null) {
            if (!awaitNodeReady(targetNode, coordinatorType)) {
                log.trace("Target node {} not ready within request timeout, will retry when node is ready.", targetNode);
                maybeFindCoordinatorAndRetry(nextRequestHandler);
                return true;
            }
        } else if (coordinatorType != null) {
            log.trace("Coordinator not known for {}, will retry {} after finding coordinator.", coordinatorType, requestBuilder.apiKey());
            maybeFindCoordinatorAndRetry(nextRequestHandler);
            return true;
        } else {
            log.trace("No nodes available to send requests, will poll and retry when until a node is ready.");
            transactionManager.retry(nextRequestHandler);
            client.poll(retryBackoffMs, time.milliseconds());
            return true;
        }
        if (nextRequestHandler.isRetry())
            time.sleep(nextRequestHandler.retryBackoffMs());
        long currentTimeMs = time.milliseconds();
        ClientRequest clientRequest = client.newClientRequest(targetNode.idString(), requestBuilder, currentTimeMs, true, requestTimeoutMs, nextRequestHandler);
        log.debug("Sending transactional request {} to node {} with correlation ID {}", requestBuilder, targetNode, clientRequest.correlationId());
        client.send(clientRequest, currentTimeMs);
        transactionManager.setInFlightCorrelationId(clientRequest.correlationId());
        client.poll(retryBackoffMs, time.milliseconds());
        return true;
    } catch (IOException e) {
        log.debug("Disconnect from {} while trying to send request {}. Going " + "to back off and retry.", targetNode, requestBuilder, e);
        // We break here so that we pick up the FindCoordinator request immediately.
        maybeFindCoordinatorAndRetry(nextRequestHandler);
        return true;
    }
}
Also used : FindCoordinatorRequest(org.apache.kafka.common.requests.FindCoordinatorRequest) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) Node(org.apache.kafka.common.Node) IOException(java.io.IOException) TransactionAbortedException(org.apache.kafka.common.errors.TransactionAbortedException) ClientRequest(org.apache.kafka.clients.ClientRequest)

Example 32 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project kafka by apache.

the class FetcherTest method testFetcherConcurrency.

@Test
public void testFetcherConcurrency() throws Exception {
    int numPartitions = 20;
    Set<TopicPartition> topicPartitions = new HashSet<>();
    for (int i = 0; i < numPartitions; i++) topicPartitions.add(new TopicPartition(topicName, i));
    LogContext logContext = new LogContext();
    buildDependencies(new MetricConfig(), Long.MAX_VALUE, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), logContext);
    fetcher = new Fetcher<byte[], byte[]>(new LogContext(), consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, 2 * numPartitions, true, "", new ByteArrayDeserializer(), new ByteArrayDeserializer(), metadata, subscriptions, metrics, metricsRegistry, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, apiVersions) {

        @Override
        protected FetchSessionHandler sessionHandler(int id) {
            final FetchSessionHandler handler = super.sessionHandler(id);
            if (handler == null)
                return null;
            else {
                return new FetchSessionHandler(new LogContext(), id) {

                    @Override
                    public Builder newBuilder() {
                        verifySessionPartitions();
                        return handler.newBuilder();
                    }

                    @Override
                    public boolean handleResponse(FetchResponse response, short version) {
                        verifySessionPartitions();
                        return handler.handleResponse(response, version);
                    }

                    @Override
                    public void handleError(Throwable t) {
                        verifySessionPartitions();
                        handler.handleError(t);
                    }

                    // Verify that session partitions can be traversed safely.
                    private void verifySessionPartitions() {
                        try {
                            Field field = FetchSessionHandler.class.getDeclaredField("sessionPartitions");
                            field.setAccessible(true);
                            LinkedHashMap<?, ?> sessionPartitions = (LinkedHashMap<?, ?>) field.get(handler);
                            for (Map.Entry<?, ?> entry : sessionPartitions.entrySet()) {
                                // If `sessionPartitions` are modified on another thread, Thread.yield will increase the
                                // possibility of ConcurrentModificationException if appropriate synchronization is not used.
                                Thread.yield();
                            }
                        } catch (Exception e) {
                            throw new RuntimeException(e);
                        }
                    }
                };
            }
        }
    };
    MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, numPartitions), tp -> validLeaderEpoch, topicIds);
    client.updateMetadata(initialMetadataResponse);
    fetchSize = 10000;
    assignFromUser(topicPartitions);
    topicPartitions.forEach(tp -> subscriptions.seek(tp, 0L));
    AtomicInteger fetchesRemaining = new AtomicInteger(1000);
    executorService = Executors.newSingleThreadExecutor();
    Future<?> future = executorService.submit(() -> {
        while (fetchesRemaining.get() > 0) {
            synchronized (consumerClient) {
                if (!client.requests().isEmpty()) {
                    ClientRequest request = client.requests().peek();
                    FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
                    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
                    for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry : fetchRequest.fetchData(topicNames).entrySet()) {
                        TopicIdPartition tp = entry.getKey();
                        long offset = entry.getValue().fetchOffset;
                        responseMap.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(offset + 2).setLastStableOffset(offset + 2).setLogStartOffset(0).setRecords(buildRecords(offset, 2, offset)));
                    }
                    client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
                    consumerClient.poll(time.timer(0));
                }
            }
        }
        return fetchesRemaining.get();
    });
    Map<TopicPartition, Long> nextFetchOffsets = topicPartitions.stream().collect(Collectors.toMap(Function.identity(), t -> 0L));
    while (fetchesRemaining.get() > 0 && !future.isDone()) {
        if (fetcher.sendFetches() == 1) {
            synchronized (consumerClient) {
                consumerClient.poll(time.timer(0));
            }
        }
        if (fetcher.hasCompletedFetches()) {
            Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
            if (!fetchedRecords.isEmpty()) {
                fetchesRemaining.decrementAndGet();
                fetchedRecords.forEach((tp, records) -> {
                    assertEquals(2, records.size());
                    long nextOffset = nextFetchOffsets.get(tp);
                    assertEquals(nextOffset, records.get(0).offset());
                    assertEquals(nextOffset + 1, records.get(1).offset());
                    nextFetchOffsets.put(tp, nextOffset + 2);
                });
            }
        }
    }
    assertEquals(0, future.get());
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) BeforeEach(org.junit.jupiter.api.BeforeEach) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) DefaultRecordBatch(org.apache.kafka.common.record.DefaultRecordBatch) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Collections.singletonList(java.util.Collections.singletonList) ClientUtils(org.apache.kafka.clients.ClientUtils) Cluster(org.apache.kafka.common.Cluster) Future(java.util.concurrent.Future) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) DataOutputStream(java.io.DataOutputStream) ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) Arrays.asList(java.util.Arrays.asList) RecordBatch(org.apache.kafka.common.record.RecordBatch) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) FetchResponse(org.apache.kafka.common.requests.FetchResponse) TimestampType(org.apache.kafka.common.record.TimestampType) Sensor(org.apache.kafka.common.metrics.Sensor) CompressionType(org.apache.kafka.common.record.CompressionType) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) StandardCharsets(java.nio.charset.StandardCharsets) Executors(java.util.concurrent.Executors) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Metrics(org.apache.kafka.common.metrics.Metrics) ApiMessageType(org.apache.kafka.common.message.ApiMessageType) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) FetchRequest(org.apache.kafka.common.requests.FetchRequest) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) Records(org.apache.kafka.common.record.Records) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) OffsetForLeaderPartition(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition) ClientDnsLookup(org.apache.kafka.clients.ClientDnsLookup) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) RequestTestUtils(org.apache.kafka.common.requests.RequestTestUtils) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ListOffsetsPartitionResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse) OffsetForLeaderEpochRequestData(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData) INVALID_SESSION_ID(org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) UNDEFINED_EPOCH(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH) NetworkClient(org.apache.kafka.clients.NetworkClient) Deserializer(org.apache.kafka.common.serialization.Deserializer) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TestUtils.assertOptional(org.apache.kafka.test.TestUtils.assertOptional) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MockSelector(org.apache.kafka.test.MockSelector) Field(java.lang.reflect.Field) ApiVersions(org.apache.kafka.clients.ApiVersions) MetricNameTemplate(org.apache.kafka.common.MetricNameTemplate) OffsetForLeaderEpochResponseData(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData) Assertions.assertArrayEquals(org.junit.jupiter.api.Assertions.assertArrayEquals) AfterEach(org.junit.jupiter.api.AfterEach) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) BytesDeserializer(org.apache.kafka.common.serialization.BytesDeserializer) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ControlRecordType(org.apache.kafka.common.record.ControlRecordType) ByteBuffer(java.nio.ByteBuffer) ClientRequest(org.apache.kafka.clients.ClientRequest) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) Record(org.apache.kafka.common.record.Record) Collections.singleton(java.util.Collections.singleton) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) BufferSupplier(org.apache.kafka.common.utils.BufferSupplier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MetricName(org.apache.kafka.common.MetricName) OffsetForLeaderTopicResult(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.OffsetForLeaderTopicResult) ListOffsetsTopic(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) Collections.emptyList(java.util.Collections.emptyList) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) Collectors(java.util.stream.Collectors) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) Test(org.junit.jupiter.api.Test) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) Header(org.apache.kafka.common.header.Header) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) Optional(java.util.Optional) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) OffsetsForLeaderEpochResponse(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse) Uuid(org.apache.kafka.common.Uuid) EpochEndOffset(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset) Metadata(org.apache.kafka.clients.Metadata) EndTransactionMarker(org.apache.kafka.common.record.EndTransactionMarker) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) HashMap(java.util.HashMap) Function(java.util.function.Function) HashSet(java.util.HashSet) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) Collections.singletonMap(java.util.Collections.singletonMap) ExecutorService(java.util.concurrent.ExecutorService) Utils(org.apache.kafka.common.utils.Utils) UNDEFINED_EPOCH_OFFSET(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) MockClient(org.apache.kafka.clients.MockClient) ListOffsetsPartition(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition) Iterator(java.util.Iterator) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) TimeUnit(java.util.concurrent.TimeUnit) IsolationLevel(org.apache.kafka.common.IsolationLevel) DelayedReceive(org.apache.kafka.test.DelayedReceive) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) OffsetsForLeaderEpochRequest(org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest) Collections(java.util.Collections) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) LinkedHashMap(java.util.LinkedHashMap) Field(java.lang.reflect.Field) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ClientRequest(org.apache.kafka.clients.ClientRequest) HashSet(java.util.HashSet) LogContext(org.apache.kafka.common.utils.LogContext) FetchResponse(org.apache.kafka.common.requests.FetchResponse) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) Collections.emptyMap(java.util.Collections.emptyMap) Test(org.junit.jupiter.api.Test)

Example 33 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project kafka by apache.

the class KafkaConsumerTest method testSubscriptionChangesWithAutoCommitDisabled.

/**
 * Verify that when a consumer changes its topic subscription its assigned partitions
 * do not immediately change, and the consumed offsets of its to-be-revoked partitions
 * are not committed (when auto-commit is disabled).
 * Upon unsubscribing from subscribed topics, the assigned partitions immediately
 * change but if auto-commit is disabled the consumer offsets are not committed.
 */
@Test
public void testSubscriptionChangesWithAutoCommitDisabled() {
    ConsumerMetadata metadata = createMetadata(subscription);
    MockClient client = new MockClient(time, metadata);
    Map<String, Integer> tpCounts = new HashMap<>();
    tpCounts.put(topic, 1);
    tpCounts.put(topic2, 1);
    initMetadata(client, tpCounts);
    Node node = metadata.fetch().nodes().get(0);
    ConsumerPartitionAssignor assignor = new RangeAssignor();
    KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, false, groupInstanceId);
    initializeSubscriptionWithSingleTopic(consumer, getConsumerRebalanceListener(consumer));
    // mock rebalance responses
    prepareRebalance(client, node, assignor, singletonList(tp0), null);
    consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE));
    consumer.poll(Duration.ZERO);
    // verify that subscription is still the same, and now assignment has caught up
    assertEquals(singleton(topic), consumer.subscription());
    assertEquals(singleton(tp0), consumer.assignment());
    consumer.poll(Duration.ZERO);
    // subscription change
    consumer.subscribe(singleton(topic2), getConsumerRebalanceListener(consumer));
    // verify that subscription has changed but assignment is still unchanged
    assertEquals(singleton(topic2), consumer.subscription());
    assertEquals(singleton(tp0), consumer.assignment());
    // the auto commit is disabled, so no offset commit request should be sent
    for (ClientRequest req : client.requests()) assertNotSame(ApiKeys.OFFSET_COMMIT, req.requestBuilder().apiKey());
    // subscription change
    consumer.unsubscribe();
    // verify that subscription and assignment are both updated
    assertEquals(Collections.emptySet(), consumer.subscription());
    assertEquals(Collections.emptySet(), consumer.assignment());
    // the auto commit is disabled, so no offset commit request should be sent
    for (ClientRequest req : client.requests()) assertNotSame(ApiKeys.OFFSET_COMMIT, req.requestBuilder().apiKey());
    client.requests().clear();
    consumer.close();
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ConsumerMetadata(org.apache.kafka.clients.consumer.internals.ConsumerMetadata) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) Node(org.apache.kafka.common.Node) ClientRequest(org.apache.kafka.clients.ClientRequest) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.jupiter.api.Test)

Example 34 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project kafka by apache.

the class SenderTest method testQuotaMetrics.

/*
     * Send multiple requests. Verify that the client side quota metrics have the right values
     */
@SuppressWarnings("deprecation")
@Test
public void testQuotaMetrics() {
    MockSelector selector = new MockSelector(time);
    Sensor throttleTimeSensor = Sender.throttleTimeSensor(this.senderMetricsRegistry);
    Cluster cluster = TestUtils.singletonCluster("test", 1);
    Node node = cluster.nodes().get(0);
    NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, 10 * 1000, 127 * 1000, time, true, new ApiVersions(), throttleTimeSensor, logContext);
    ApiVersionsResponse apiVersionsResponse = ApiVersionsResponse.defaultApiVersionsResponse(400, ApiMessageType.ListenerType.ZK_BROKER);
    ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(apiVersionsResponse, ApiKeys.API_VERSIONS.latestVersion(), 0);
    selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer)));
    while (!client.ready(node, time.milliseconds())) {
        client.poll(1, time.milliseconds());
        // If a throttled response is received, advance the time to ensure progress.
        time.sleep(client.throttleDelayMs(node, time.milliseconds()));
    }
    selector.clear();
    for (int i = 1; i <= 3; i++) {
        int throttleTimeMs = 100 * i;
        ProduceRequest.Builder builder = ProduceRequest.forCurrentMagic(new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection()).setAcks((short) 1).setTimeoutMs(1000));
        ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true);
        client.send(request, time.milliseconds());
        client.poll(1, time.milliseconds());
        ProduceResponse response = produceResponse(tp0, i, Errors.NONE, throttleTimeMs);
        buffer = RequestTestUtils.serializeResponseWithHeader(response, ApiKeys.PRODUCE.latestVersion(), request.correlationId());
        selector.completeReceive(new NetworkReceive(node.idString(), buffer));
        client.poll(1, time.milliseconds());
        // If a throttled response is received, advance the time to ensure progress.
        time.sleep(client.throttleDelayMs(node, time.milliseconds()));
        selector.clear();
    }
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric avgMetric = allMetrics.get(this.senderMetricsRegistry.produceThrottleTimeAvg);
    KafkaMetric maxMetric = allMetrics.get(this.senderMetricsRegistry.produceThrottleTimeMax);
    // Throttle times are ApiVersions=400, Produce=(100, 200, 300)
    assertEquals(250, (Double) avgMetric.metricValue(), EPS);
    assertEquals(400, (Double) maxMetric.metricValue(), EPS);
    client.close();
}
Also used : ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) ProduceResponse(org.apache.kafka.common.requests.ProduceResponse) Node(org.apache.kafka.common.Node) ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) Cluster(org.apache.kafka.common.Cluster) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) ByteBuffer(java.nio.ByteBuffer) MockSelector(org.apache.kafka.test.MockSelector) MetricName(org.apache.kafka.common.MetricName) NetworkClient(org.apache.kafka.clients.NetworkClient) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) DelayedReceive(org.apache.kafka.test.DelayedReceive) ClientRequest(org.apache.kafka.clients.ClientRequest) Sensor(org.apache.kafka.common.metrics.Sensor) Test(org.junit.jupiter.api.Test)

Example 35 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project kafka by apache.

the class SenderTest method testCorrectHandlingOfDuplicateSequenceError.

@Test
public void testCorrectHandlingOfDuplicateSequenceError() throws Exception {
    final long producerId = 343434L;
    TransactionManager transactionManager = createTransactionManager();
    setupWithTransactionState(transactionManager);
    prepareAndReceiveInitProducerId(producerId, Errors.NONE);
    assertTrue(transactionManager.hasProducerId());
    assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
    // Send first ProduceRequest
    Future<RecordMetadata> request1 = appendToAccumulator(tp0);
    sender.runOnce();
    String nodeId = client.requests().peek().destination();
    Node node = new Node(Integer.valueOf(nodeId), "localhost", 0);
    assertEquals(1, client.inFlightRequestCount());
    assertEquals(1, transactionManager.sequenceNumber(tp0).longValue());
    assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
    // Send second ProduceRequest
    Future<RecordMetadata> request2 = appendToAccumulator(tp0);
    sender.runOnce();
    assertEquals(2, client.inFlightRequestCount());
    assertEquals(2, transactionManager.sequenceNumber(tp0).longValue());
    assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
    assertFalse(request1.isDone());
    assertFalse(request2.isDone());
    assertTrue(client.isReady(node, time.milliseconds()));
    ClientRequest firstClientRequest = client.requests().peek();
    ClientRequest secondClientRequest = (ClientRequest) client.requests().toArray()[1];
    client.respondToRequest(secondClientRequest, produceResponse(tp0, 1000, Errors.NONE, 0));
    // receive response 1
    sender.runOnce();
    assertEquals(OptionalLong.of(1000), transactionManager.lastAckedOffset(tp0));
    assertEquals(OptionalInt.of(1), transactionManager.lastAckedSequence(tp0));
    client.respondToRequest(firstClientRequest, produceResponse(tp0, ProduceResponse.INVALID_OFFSET, Errors.DUPLICATE_SEQUENCE_NUMBER, 0));
    // receive response 0
    sender.runOnce();
    // Make sure that the last ack'd sequence doesn't change.
    assertEquals(OptionalInt.of(1), transactionManager.lastAckedSequence(tp0));
    assertEquals(OptionalLong.of(1000), transactionManager.lastAckedOffset(tp0));
    assertFalse(client.hasInFlightRequests());
    RecordMetadata unknownMetadata = request1.get();
    assertFalse(unknownMetadata.hasOffset());
    assertEquals(-1L, unknownMetadata.offset());
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Node(org.apache.kafka.common.Node) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ClientRequest(org.apache.kafka.clients.ClientRequest) Test(org.junit.jupiter.api.Test)

Aggregations

ClientRequest (org.apache.kafka.clients.ClientRequest)38 Node (org.apache.kafka.common.Node)25 Test (org.junit.jupiter.api.Test)11 HashMap (java.util.HashMap)8 MockClient (org.apache.kafka.clients.MockClient)8 Cluster (org.apache.kafka.common.Cluster)8 LinkedHashMap (java.util.LinkedHashMap)7 ClientResponse (org.apache.kafka.clients.ClientResponse)7 Test (org.junit.Test)7 ApiVersions (org.apache.kafka.clients.ApiVersions)6 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)6 MetricName (org.apache.kafka.common.MetricName)6 Sensor (org.apache.kafka.common.metrics.Sensor)6 ByteBuffer (java.nio.ByteBuffer)5 NetworkClient (org.apache.kafka.clients.NetworkClient)5 NodeApiVersions (org.apache.kafka.clients.NodeApiVersions)5 TopicPartition (org.apache.kafka.common.TopicPartition)5 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)5 NetworkReceive (org.apache.kafka.common.network.NetworkReceive)5 AbstractRequest (org.apache.kafka.common.requests.AbstractRequest)5