Search in sources :

Example 76 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class Fetcher method getTopicMetadata.

/**
 * Get metadata for all topics present in Kafka cluster
 *
 * @param request The MetadataRequest to send
 * @param timer Timer bounding how long this method can block
 * @return The map of topics with their partition information
 */
public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, Timer timer) {
    // Save the round trip if no topics are requested.
    if (!request.isAllTopics() && request.emptyTopicList())
        return Collections.emptyMap();
    do {
        RequestFuture<ClientResponse> future = sendMetadataRequest(request);
        client.poll(future, timer);
        if (future.failed() && !future.isRetriable())
            throw future.exception();
        if (future.succeeded()) {
            MetadataResponse response = (MetadataResponse) future.value().responseBody();
            Cluster cluster = response.buildCluster();
            Set<String> unauthorizedTopics = cluster.unauthorizedTopics();
            if (!unauthorizedTopics.isEmpty())
                throw new TopicAuthorizationException(unauthorizedTopics);
            boolean shouldRetry = false;
            Map<String, Errors> errors = response.errors();
            if (!errors.isEmpty()) {
                // if there were errors, we need to check whether they were fatal or whether
                // we should just retry
                log.debug("Topic metadata fetch included errors: {}", errors);
                for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) {
                    String topic = errorEntry.getKey();
                    Errors error = errorEntry.getValue();
                    if (error == Errors.INVALID_TOPIC_EXCEPTION)
                        throw new InvalidTopicException("Topic '" + topic + "' is invalid");
                    else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION)
                        // in the returned map
                        continue;
                    else if (error.exception() instanceof RetriableException)
                        shouldRetry = true;
                    else
                        throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception());
                }
            }
            if (!shouldRetry) {
                HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>();
                for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.partitionsForTopic(topic));
                return topicsPartitionInfos;
            }
        }
        timer.sleep(retryBackoffMs);
    } while (timer.notExpired());
    throw new TimeoutException("Timeout expired while fetching topic metadata");
}
Also used : ClientResponse(org.apache.kafka.clients.ClientResponse) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Cluster(org.apache.kafka.common.Cluster) Errors(org.apache.kafka.common.protocol.Errors) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) KafkaException(org.apache.kafka.common.KafkaException) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) RetriableException(org.apache.kafka.common.errors.RetriableException) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 77 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class AllBrokersStrategyTest method testHandleResponse.

@Test
public void testHandleResponse() {
    AllBrokersStrategy strategy = new AllBrokersStrategy(logContext);
    MetadataResponseData response = new MetadataResponseData();
    response.brokers().add(new MetadataResponseData.MetadataResponseBroker().setNodeId(1).setHost("host1").setPort(9092));
    response.brokers().add(new MetadataResponseData.MetadataResponseBroker().setNodeId(2).setHost("host2").setPort(9092));
    AdminApiLookupStrategy.LookupResult<AllBrokersStrategy.BrokerKey> lookupResult = strategy.handleResponse(AllBrokersStrategy.LOOKUP_KEYS, new MetadataResponse(response, ApiKeys.METADATA.latestVersion()));
    assertEquals(Collections.emptyMap(), lookupResult.failedKeys);
    Set<AllBrokersStrategy.BrokerKey> expectedMappedKeys = mkSet(new AllBrokersStrategy.BrokerKey(OptionalInt.of(1)), new AllBrokersStrategy.BrokerKey(OptionalInt.of(2)));
    assertEquals(expectedMappedKeys, lookupResult.mappedKeys.keySet());
    lookupResult.mappedKeys.forEach((brokerKey, brokerId) -> {
        assertEquals(OptionalInt.of(brokerId), brokerKey.brokerId);
    });
}
Also used : MetadataResponseData(org.apache.kafka.common.message.MetadataResponseData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) Test(org.junit.jupiter.api.Test)

Example 78 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class PartitionLeaderStrategyTest method responseWithTopicError.

private MetadataResponse responseWithTopicError(String topic, Errors error) {
    MetadataResponseTopic responseTopic = new MetadataResponseTopic().setName(topic).setErrorCode(error.code());
    MetadataResponseData responseData = new MetadataResponseData();
    responseData.topics().add(responseTopic);
    return new MetadataResponse(responseData, ApiKeys.METADATA.latestVersion());
}
Also used : MetadataResponseTopic(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic) MetadataResponseData(org.apache.kafka.common.message.MetadataResponseData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse)

Example 79 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class FetcherTest method testGetOffsetByTimeWithPartitionsRetryCouldTriggerMetadataUpdate.

@Test
public void testGetOffsetByTimeWithPartitionsRetryCouldTriggerMetadataUpdate() {
    List<Errors> retriableErrors = Arrays.asList(Errors.NOT_LEADER_OR_FOLLOWER, Errors.REPLICA_NOT_AVAILABLE, Errors.KAFKA_STORAGE_ERROR, Errors.OFFSET_NOT_AVAILABLE, Errors.LEADER_NOT_AVAILABLE, Errors.FENCED_LEADER_EPOCH, Errors.UNKNOWN_LEADER_EPOCH);
    final int newLeaderEpoch = 3;
    MetadataResponse updatedMetadata = RequestTestUtils.metadataUpdateWithIds("dummy", 3, singletonMap(topicName, Errors.NONE), singletonMap(topicName, 4), tp -> newLeaderEpoch, topicIds);
    Node originalLeader = initialUpdateResponse.buildCluster().leaderFor(tp1);
    Node newLeader = updatedMetadata.buildCluster().leaderFor(tp1);
    assertNotEquals(originalLeader, newLeader);
    for (Errors retriableError : retriableErrors) {
        buildFetcher();
        subscriptions.assignFromUser(mkSet(tp0, tp1));
        client.updateMetadata(initialUpdateResponse);
        final long fetchTimestamp = 10L;
        ListOffsetsPartitionResponse tp0NoError = new ListOffsetsPartitionResponse().setPartitionIndex(tp0.partition()).setErrorCode(Errors.NONE.code()).setTimestamp(fetchTimestamp).setOffset(4L);
        List<ListOffsetsTopicResponse> topics = Collections.singletonList(new ListOffsetsTopicResponse().setName(tp0.topic()).setPartitions(Arrays.asList(tp0NoError, new ListOffsetsPartitionResponse().setPartitionIndex(tp1.partition()).setErrorCode(retriableError.code()).setTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP).setOffset(-1L))));
        ListOffsetsResponseData data = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(topics);
        client.prepareResponseFrom(body -> {
            boolean isListOffsetRequest = body instanceof ListOffsetsRequest;
            if (isListOffsetRequest) {
                ListOffsetsRequest request = (ListOffsetsRequest) body;
                List<ListOffsetsTopic> expectedTopics = Collections.singletonList(new ListOffsetsTopic().setName(tp0.topic()).setPartitions(Arrays.asList(new ListOffsetsPartition().setPartitionIndex(tp1.partition()).setTimestamp(fetchTimestamp).setCurrentLeaderEpoch(ListOffsetsResponse.UNKNOWN_EPOCH), new ListOffsetsPartition().setPartitionIndex(tp0.partition()).setTimestamp(fetchTimestamp).setCurrentLeaderEpoch(ListOffsetsResponse.UNKNOWN_EPOCH))));
                return request.topics().equals(expectedTopics);
            } else {
                return false;
            }
        }, new ListOffsetsResponse(data), originalLeader);
        client.prepareMetadataUpdate(updatedMetadata);
        // If the metadata wasn't updated before retrying, the fetcher would consult the original leader and hit a NOT_LEADER exception.
        // We will count the answered future response in the end to verify if this is the case.
        List<ListOffsetsTopicResponse> topicsWithFatalError = Collections.singletonList(new ListOffsetsTopicResponse().setName(tp0.topic()).setPartitions(Arrays.asList(tp0NoError, new ListOffsetsPartitionResponse().setPartitionIndex(tp1.partition()).setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code()).setTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP).setOffset(-1L))));
        ListOffsetsResponseData dataWithFatalError = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(topicsWithFatalError);
        client.prepareResponseFrom(new ListOffsetsResponse(dataWithFatalError), originalLeader);
        // The request to new leader must only contain one partition tp1 with error.
        client.prepareResponseFrom(body -> {
            boolean isListOffsetRequest = body instanceof ListOffsetsRequest;
            if (isListOffsetRequest) {
                ListOffsetsRequest request = (ListOffsetsRequest) body;
                ListOffsetsTopic requestTopic = request.topics().get(0);
                ListOffsetsPartition expectedPartition = new ListOffsetsPartition().setPartitionIndex(tp1.partition()).setTimestamp(fetchTimestamp).setCurrentLeaderEpoch(newLeaderEpoch);
                return expectedPartition.equals(requestTopic.partitions().get(0));
            } else {
                return false;
            }
        }, listOffsetResponse(tp1, Errors.NONE, fetchTimestamp, 5L), newLeader);
        Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.offsetsForTimes(Utils.mkMap(Utils.mkEntry(tp0, fetchTimestamp), Utils.mkEntry(tp1, fetchTimestamp)), time.timer(Integer.MAX_VALUE));
        assertEquals(Utils.mkMap(Utils.mkEntry(tp0, new OffsetAndTimestamp(4L, fetchTimestamp)), Utils.mkEntry(tp1, new OffsetAndTimestamp(5L, fetchTimestamp))), offsetAndTimestampMap);
        // The NOT_LEADER exception future should not be cleared as we already refreshed the metadata before
        // first retry, thus never hitting.
        assertEquals(1, client.numAwaitingResponses());
        fetcher.close();
    }
}
Also used : ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Node(org.apache.kafka.common.Node) ListOffsetsPartitionResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest) Errors(org.apache.kafka.common.protocol.Errors) ListOffsetsPartition(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition) ListOffsetsTopic(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) Test(org.junit.jupiter.api.Test)

Example 80 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class FetcherTest method testFetcherConcurrency.

@Test
public void testFetcherConcurrency() throws Exception {
    int numPartitions = 20;
    Set<TopicPartition> topicPartitions = new HashSet<>();
    for (int i = 0; i < numPartitions; i++) topicPartitions.add(new TopicPartition(topicName, i));
    LogContext logContext = new LogContext();
    buildDependencies(new MetricConfig(), Long.MAX_VALUE, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), logContext);
    fetcher = new Fetcher<byte[], byte[]>(new LogContext(), consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, 2 * numPartitions, true, "", new ByteArrayDeserializer(), new ByteArrayDeserializer(), metadata, subscriptions, metrics, metricsRegistry, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, apiVersions) {

        @Override
        protected FetchSessionHandler sessionHandler(int id) {
            final FetchSessionHandler handler = super.sessionHandler(id);
            if (handler == null)
                return null;
            else {
                return new FetchSessionHandler(new LogContext(), id) {

                    @Override
                    public Builder newBuilder() {
                        verifySessionPartitions();
                        return handler.newBuilder();
                    }

                    @Override
                    public boolean handleResponse(FetchResponse response, short version) {
                        verifySessionPartitions();
                        return handler.handleResponse(response, version);
                    }

                    @Override
                    public void handleError(Throwable t) {
                        verifySessionPartitions();
                        handler.handleError(t);
                    }

                    // Verify that session partitions can be traversed safely.
                    private void verifySessionPartitions() {
                        try {
                            Field field = FetchSessionHandler.class.getDeclaredField("sessionPartitions");
                            field.setAccessible(true);
                            LinkedHashMap<?, ?> sessionPartitions = (LinkedHashMap<?, ?>) field.get(handler);
                            for (Map.Entry<?, ?> entry : sessionPartitions.entrySet()) {
                                // If `sessionPartitions` are modified on another thread, Thread.yield will increase the
                                // possibility of ConcurrentModificationException if appropriate synchronization is not used.
                                Thread.yield();
                            }
                        } catch (Exception e) {
                            throw new RuntimeException(e);
                        }
                    }
                };
            }
        }
    };
    MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, numPartitions), tp -> validLeaderEpoch, topicIds);
    client.updateMetadata(initialMetadataResponse);
    fetchSize = 10000;
    assignFromUser(topicPartitions);
    topicPartitions.forEach(tp -> subscriptions.seek(tp, 0L));
    AtomicInteger fetchesRemaining = new AtomicInteger(1000);
    executorService = Executors.newSingleThreadExecutor();
    Future<?> future = executorService.submit(() -> {
        while (fetchesRemaining.get() > 0) {
            synchronized (consumerClient) {
                if (!client.requests().isEmpty()) {
                    ClientRequest request = client.requests().peek();
                    FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
                    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
                    for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry : fetchRequest.fetchData(topicNames).entrySet()) {
                        TopicIdPartition tp = entry.getKey();
                        long offset = entry.getValue().fetchOffset;
                        responseMap.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(offset + 2).setLastStableOffset(offset + 2).setLogStartOffset(0).setRecords(buildRecords(offset, 2, offset)));
                    }
                    client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
                    consumerClient.poll(time.timer(0));
                }
            }
        }
        return fetchesRemaining.get();
    });
    Map<TopicPartition, Long> nextFetchOffsets = topicPartitions.stream().collect(Collectors.toMap(Function.identity(), t -> 0L));
    while (fetchesRemaining.get() > 0 && !future.isDone()) {
        if (fetcher.sendFetches() == 1) {
            synchronized (consumerClient) {
                consumerClient.poll(time.timer(0));
            }
        }
        if (fetcher.hasCompletedFetches()) {
            Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
            if (!fetchedRecords.isEmpty()) {
                fetchesRemaining.decrementAndGet();
                fetchedRecords.forEach((tp, records) -> {
                    assertEquals(2, records.size());
                    long nextOffset = nextFetchOffsets.get(tp);
                    assertEquals(nextOffset, records.get(0).offset());
                    assertEquals(nextOffset + 1, records.get(1).offset());
                    nextFetchOffsets.put(tp, nextOffset + 2);
                });
            }
        }
    }
    assertEquals(0, future.get());
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) BeforeEach(org.junit.jupiter.api.BeforeEach) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) DefaultRecordBatch(org.apache.kafka.common.record.DefaultRecordBatch) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Collections.singletonList(java.util.Collections.singletonList) ClientUtils(org.apache.kafka.clients.ClientUtils) Cluster(org.apache.kafka.common.Cluster) Future(java.util.concurrent.Future) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) DataOutputStream(java.io.DataOutputStream) ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) Arrays.asList(java.util.Arrays.asList) RecordBatch(org.apache.kafka.common.record.RecordBatch) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) FetchResponse(org.apache.kafka.common.requests.FetchResponse) TimestampType(org.apache.kafka.common.record.TimestampType) Sensor(org.apache.kafka.common.metrics.Sensor) CompressionType(org.apache.kafka.common.record.CompressionType) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) StandardCharsets(java.nio.charset.StandardCharsets) Executors(java.util.concurrent.Executors) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Metrics(org.apache.kafka.common.metrics.Metrics) ApiMessageType(org.apache.kafka.common.message.ApiMessageType) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) FetchRequest(org.apache.kafka.common.requests.FetchRequest) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) Records(org.apache.kafka.common.record.Records) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) OffsetForLeaderPartition(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition) ClientDnsLookup(org.apache.kafka.clients.ClientDnsLookup) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) RequestTestUtils(org.apache.kafka.common.requests.RequestTestUtils) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ListOffsetsPartitionResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse) OffsetForLeaderEpochRequestData(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData) INVALID_SESSION_ID(org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) UNDEFINED_EPOCH(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH) NetworkClient(org.apache.kafka.clients.NetworkClient) Deserializer(org.apache.kafka.common.serialization.Deserializer) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TestUtils.assertOptional(org.apache.kafka.test.TestUtils.assertOptional) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MockSelector(org.apache.kafka.test.MockSelector) Field(java.lang.reflect.Field) ApiVersions(org.apache.kafka.clients.ApiVersions) MetricNameTemplate(org.apache.kafka.common.MetricNameTemplate) OffsetForLeaderEpochResponseData(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData) Assertions.assertArrayEquals(org.junit.jupiter.api.Assertions.assertArrayEquals) AfterEach(org.junit.jupiter.api.AfterEach) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) BytesDeserializer(org.apache.kafka.common.serialization.BytesDeserializer) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ControlRecordType(org.apache.kafka.common.record.ControlRecordType) ByteBuffer(java.nio.ByteBuffer) ClientRequest(org.apache.kafka.clients.ClientRequest) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) Record(org.apache.kafka.common.record.Record) Collections.singleton(java.util.Collections.singleton) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) BufferSupplier(org.apache.kafka.common.utils.BufferSupplier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MetricName(org.apache.kafka.common.MetricName) OffsetForLeaderTopicResult(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.OffsetForLeaderTopicResult) ListOffsetsTopic(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) Collections.emptyList(java.util.Collections.emptyList) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) Collectors(java.util.stream.Collectors) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) Test(org.junit.jupiter.api.Test) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) Header(org.apache.kafka.common.header.Header) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) Optional(java.util.Optional) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) OffsetsForLeaderEpochResponse(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse) Uuid(org.apache.kafka.common.Uuid) EpochEndOffset(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset) Metadata(org.apache.kafka.clients.Metadata) EndTransactionMarker(org.apache.kafka.common.record.EndTransactionMarker) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) HashMap(java.util.HashMap) Function(java.util.function.Function) HashSet(java.util.HashSet) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) Collections.singletonMap(java.util.Collections.singletonMap) ExecutorService(java.util.concurrent.ExecutorService) Utils(org.apache.kafka.common.utils.Utils) UNDEFINED_EPOCH_OFFSET(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) MockClient(org.apache.kafka.clients.MockClient) ListOffsetsPartition(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition) Iterator(java.util.Iterator) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) TimeUnit(java.util.concurrent.TimeUnit) IsolationLevel(org.apache.kafka.common.IsolationLevel) DelayedReceive(org.apache.kafka.test.DelayedReceive) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) OffsetsForLeaderEpochRequest(org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest) Collections(java.util.Collections) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) LinkedHashMap(java.util.LinkedHashMap) Field(java.lang.reflect.Field) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ClientRequest(org.apache.kafka.clients.ClientRequest) HashSet(java.util.HashSet) LogContext(org.apache.kafka.common.utils.LogContext) FetchResponse(org.apache.kafka.common.requests.FetchResponse) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) Collections.emptyMap(java.util.Collections.emptyMap) Test(org.junit.jupiter.api.Test)

Aggregations

MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)107 Test (org.junit.jupiter.api.Test)71 HashMap (java.util.HashMap)68 TopicPartition (org.apache.kafka.common.TopicPartition)43 MockTime (org.apache.kafka.common.utils.MockTime)38 Time (org.apache.kafka.common.utils.Time)37 Node (org.apache.kafka.common.Node)33 ArrayList (java.util.ArrayList)30 MockClient (org.apache.kafka.clients.MockClient)29 Cluster (org.apache.kafka.common.Cluster)29 MetadataRequest (org.apache.kafka.common.requests.MetadataRequest)28 Errors (org.apache.kafka.common.protocol.Errors)27 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)27 Map (java.util.Map)26 ProducerMetadata (org.apache.kafka.clients.producer.internals.ProducerMetadata)26 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)26 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)25 List (java.util.List)24 LogContext (org.apache.kafka.common.utils.LogContext)22 HashSet (java.util.HashSet)21