Search in sources :

Example 46 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class GetListOffsetsCallsBenchmark method setup.

@Setup(Level.Trial)
public void setup() {
    MetadataResponseData data = new MetadataResponseData();
    List<MetadataResponseTopic> mrTopicList = new ArrayList<>();
    Set<String> topics = new HashSet<>();
    for (int topicIndex = 0; topicIndex < topicCount; topicIndex++) {
        Uuid topicId = Uuid.randomUuid();
        String topicName = "topic-" + topicIndex;
        MetadataResponseTopic mrTopic = new MetadataResponseTopic().setTopicId(topicId).setName(topicName).setErrorCode((short) 0).setIsInternal(false);
        List<MetadataResponsePartition> mrPartitionList = new ArrayList<>();
        for (int partition = 0; partition < partitionCount; partition++) {
            TopicPartition tp = new TopicPartition(topicName, partition);
            topics.add(tp.topic());
            futures.put(tp, new KafkaFutureImpl<>());
            topicPartitionOffsets.put(tp, OffsetSpec.latest());
            MetadataResponsePartition mrPartition = new MetadataResponsePartition().setLeaderId(partition % numNodes).setPartitionIndex(partition).setIsrNodes(Arrays.asList(0, 1, 2)).setReplicaNodes(Arrays.asList(0, 1, 2)).setOfflineReplicas(Collections.emptyList()).setErrorCode((short) 0);
            mrPartitionList.add(mrPartition);
        }
        mrTopic.setPartitions(mrPartitionList);
        mrTopicList.add(mrTopic);
    }
    data.setTopics(new MetadataResponseData.MetadataResponseTopicCollection(mrTopicList.listIterator()));
    long deadline = 0L;
    short version = 0;
    context = new MetadataOperationContext<>(topics, new ListOffsetsOptions(), deadline, futures);
    context.setResponse(Optional.of(new MetadataResponse(data, version)));
    AdminClientUnitTestEnv adminEnv = new AdminClientUnitTestEnv(mockCluster());
    admin = (KafkaAdminClient) adminEnv.adminClient();
}
Also used : MetadataResponseTopic(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic) AdminClientUnitTestEnv(org.apache.kafka.clients.admin.AdminClientUnitTestEnv) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) ArrayList(java.util.ArrayList) MetadataResponsePartition(org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponseData(org.apache.kafka.common.message.MetadataResponseData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) HashSet(java.util.HashSet) Setup(org.openjdk.jmh.annotations.Setup)

Example 47 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class FetchRequestBenchmark method setup.

@Setup(Level.Trial)
public void setup() {
    this.fetchData = new HashMap<>();
    this.topicNames = new HashMap<>();
    for (int topicIdx = 0; topicIdx < topicCount; topicIdx++) {
        String topic = Uuid.randomUuid().toString();
        Uuid id = Uuid.randomUuid();
        topicNames.put(id, topic);
        for (int partitionId = 0; partitionId < partitionCount; partitionId++) {
            FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData(id, 0, 0, 4096, Optional.empty());
            fetchData.put(new TopicPartition(topic, partitionId), partitionData);
        }
    }
    this.header = new RequestHeader(ApiKeys.FETCH, ApiKeys.FETCH.latestVersion(), "jmh-benchmark", 100);
    this.consumerRequest = FetchRequest.Builder.forConsumer(ApiKeys.FETCH.latestVersion(), 0, 0, fetchData).build(ApiKeys.FETCH.latestVersion());
    this.replicaRequest = FetchRequest.Builder.forReplica(ApiKeys.FETCH.latestVersion(), 1, 0, 0, fetchData).build(ApiKeys.FETCH.latestVersion());
    this.requestBuffer = this.consumerRequest.serialize();
}
Also used : Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequest(org.apache.kafka.common.requests.FetchRequest) RequestHeader(org.apache.kafka.common.requests.RequestHeader) Setup(org.openjdk.jmh.annotations.Setup)

Example 48 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class FetchSessionBenchmark method setUp.

@Setup(Level.Trial)
public void setUp() {
    fetches = new LinkedHashMap<>();
    handler = new FetchSessionHandler(LOG_CONTEXT, 1);
    topicIds = new HashMap<>();
    FetchSessionHandler.Builder builder = handler.newBuilder();
    Uuid id = Uuid.randomUuid();
    topicIds.put("foo", id);
    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> respMap = new LinkedHashMap<>();
    for (int i = 0; i < partitionCount; i++) {
        TopicPartition tp = new TopicPartition("foo", i);
        FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData(id, 0, 0, 200, Optional.empty());
        fetches.put(tp, partitionData);
        builder.add(tp, partitionData);
        respMap.put(new TopicIdPartition(id, tp), new FetchResponseData.PartitionData().setPartitionIndex(tp.partition()).setLastStableOffset(0).setLogStartOffset(0));
    }
    builder.build();
    // build and handle an initial response so that the next fetch will be incremental
    handler.handleResponse(FetchResponse.of(Errors.NONE, 0, 1, respMap), ApiKeys.FETCH.latestVersion());
    int counter = 0;
    for (TopicPartition topicPartition : new ArrayList<>(fetches.keySet())) {
        if (updatedPercentage != 0 && counter % (100 / updatedPercentage) == 0) {
            // reorder in fetch session, and update log start offset
            fetches.remove(topicPartition);
            fetches.put(topicPartition, new FetchRequest.PartitionData(id, 50, 40, 200, Optional.empty()));
        }
        counter++;
    }
}
Also used : ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) LinkedHashMap(java.util.LinkedHashMap) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Setup(org.openjdk.jmh.annotations.Setup)

Example 49 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class KafkaAdminClientTest method testDeleteTopicsRetryThrottlingExceptionWhenEnabled.

@Test
public void testDeleteTopicsRetryThrottlingExceptionWhenEnabled() throws Exception {
    try (AdminClientUnitTestEnv env = mockClientEnv()) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopics("topic1", "topic2", "topic3"), prepareDeleteTopicsResponse(1000, deletableTopicResult("topic1", Errors.NONE), deletableTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED), deletableTopicResult("topic3", Errors.TOPIC_ALREADY_EXISTS)));
        env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopics("topic2"), prepareDeleteTopicsResponse(1000, deletableTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED)));
        env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopics("topic2"), prepareDeleteTopicsResponse(0, deletableTopicResult("topic2", Errors.NONE)));
        DeleteTopicsResult result = env.adminClient().deleteTopics(asList("topic1", "topic2", "topic3"), new DeleteTopicsOptions().retryOnQuotaViolation(true));
        assertNull(result.topicNameValues().get("topic1").get());
        assertNull(result.topicNameValues().get("topic2").get());
        TestUtils.assertFutureThrows(result.topicNameValues().get("topic3"), TopicExistsException.class);
        // With topic IDs
        Uuid topicId1 = Uuid.randomUuid();
        Uuid topicId2 = Uuid.randomUuid();
        Uuid topicId3 = Uuid.randomUuid();
        env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopicIds(topicId1, topicId2, topicId3), prepareDeleteTopicsResponse(1000, deletableTopicResultWithId(topicId1, Errors.NONE), deletableTopicResultWithId(topicId2, Errors.THROTTLING_QUOTA_EXCEEDED), deletableTopicResultWithId(topicId3, Errors.UNKNOWN_TOPIC_ID)));
        env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopicIds(topicId2), prepareDeleteTopicsResponse(1000, deletableTopicResultWithId(topicId2, Errors.THROTTLING_QUOTA_EXCEEDED)));
        env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopicIds(topicId2), prepareDeleteTopicsResponse(0, deletableTopicResultWithId(topicId2, Errors.NONE)));
        DeleteTopicsResult resultIds = env.adminClient().deleteTopics(TopicCollection.ofTopicIds(asList(topicId1, topicId2, topicId3)), new DeleteTopicsOptions().retryOnQuotaViolation(true));
        assertNull(resultIds.topicIdValues().get(topicId1).get());
        assertNull(resultIds.topicIdValues().get(topicId2).get());
        TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId3), UnknownTopicIdException.class);
    }
}
Also used : Uuid(org.apache.kafka.common.Uuid) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 50 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class MetadataTest method testMetadataMerge.

@Test
public void testMetadataMerge() {
    Time time = new MockTime();
    Map<String, Uuid> topicIds = new HashMap<>();
    final AtomicReference<Set<String>> retainTopics = new AtomicReference<>(new HashSet<>());
    metadata = new Metadata(refreshBackoffMs, metadataExpireMs, new LogContext(), new ClusterResourceListeners()) {

        @Override
        protected boolean retainTopic(String topic, boolean isInternal, long nowMs) {
            return retainTopics.get().contains(topic);
        }
    };
    // Initialize a metadata instance with two topic variants "old" and "keep". Both will be retained.
    String oldClusterId = "oldClusterId";
    int oldNodes = 2;
    Map<String, Errors> oldTopicErrors = new HashMap<>();
    oldTopicErrors.put("oldInvalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
    oldTopicErrors.put("keepInvalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
    oldTopicErrors.put("oldUnauthorizedTopic", Errors.TOPIC_AUTHORIZATION_FAILED);
    oldTopicErrors.put("keepUnauthorizedTopic", Errors.TOPIC_AUTHORIZATION_FAILED);
    Map<String, Integer> oldTopicPartitionCounts = new HashMap<>();
    oldTopicPartitionCounts.put("oldValidTopic", 2);
    oldTopicPartitionCounts.put("keepValidTopic", 3);
    retainTopics.set(Utils.mkSet("oldInvalidTopic", "keepInvalidTopic", "oldUnauthorizedTopic", "keepUnauthorizedTopic", "oldValidTopic", "keepValidTopic"));
    topicIds.put("oldValidTopic", Uuid.randomUuid());
    topicIds.put("keepValidTopic", Uuid.randomUuid());
    MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWithIds(oldClusterId, oldNodes, oldTopicErrors, oldTopicPartitionCounts, _tp -> 100, topicIds);
    metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
    Map<String, Uuid> metadataTopicIds1 = metadata.topicIds();
    retainTopics.get().forEach(topic -> assertEquals(metadataTopicIds1.get(topic), topicIds.get(topic)));
    // Update the metadata to add a new topic variant, "new", which will be retained with "keep". Note this
    // means that all of the "old" topics should be dropped.
    Cluster cluster = metadata.fetch();
    assertEquals(cluster.clusterResource().clusterId(), oldClusterId);
    assertEquals(cluster.nodes().size(), oldNodes);
    assertEquals(cluster.invalidTopics(), new HashSet<>(Arrays.asList("oldInvalidTopic", "keepInvalidTopic")));
    assertEquals(cluster.unauthorizedTopics(), new HashSet<>(Arrays.asList("oldUnauthorizedTopic", "keepUnauthorizedTopic")));
    assertEquals(cluster.topics(), new HashSet<>(Arrays.asList("oldValidTopic", "keepValidTopic")));
    assertEquals(cluster.partitionsForTopic("oldValidTopic").size(), 2);
    assertEquals(cluster.partitionsForTopic("keepValidTopic").size(), 3);
    assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values()));
    String newClusterId = "newClusterId";
    int newNodes = oldNodes + 1;
    Map<String, Errors> newTopicErrors = new HashMap<>();
    newTopicErrors.put("newInvalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
    newTopicErrors.put("newUnauthorizedTopic", Errors.TOPIC_AUTHORIZATION_FAILED);
    Map<String, Integer> newTopicPartitionCounts = new HashMap<>();
    newTopicPartitionCounts.put("keepValidTopic", 2);
    newTopicPartitionCounts.put("newValidTopic", 4);
    retainTopics.set(Utils.mkSet("keepInvalidTopic", "newInvalidTopic", "keepUnauthorizedTopic", "newUnauthorizedTopic", "keepValidTopic", "newValidTopic"));
    topicIds.put("newValidTopic", Uuid.randomUuid());
    metadataResponse = RequestTestUtils.metadataUpdateWithIds(newClusterId, newNodes, newTopicErrors, newTopicPartitionCounts, _tp -> 200, topicIds);
    metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
    topicIds.remove("oldValidTopic");
    Map<String, Uuid> metadataTopicIds2 = metadata.topicIds();
    retainTopics.get().forEach(topic -> assertEquals(metadataTopicIds2.get(topic), topicIds.get(topic)));
    assertNull(metadataTopicIds2.get("oldValidTopic"));
    cluster = metadata.fetch();
    assertEquals(cluster.clusterResource().clusterId(), newClusterId);
    assertEquals(cluster.nodes().size(), newNodes);
    assertEquals(cluster.invalidTopics(), new HashSet<>(Arrays.asList("keepInvalidTopic", "newInvalidTopic")));
    assertEquals(cluster.unauthorizedTopics(), new HashSet<>(Arrays.asList("keepUnauthorizedTopic", "newUnauthorizedTopic")));
    assertEquals(cluster.topics(), new HashSet<>(Arrays.asList("keepValidTopic", "newValidTopic")));
    assertEquals(cluster.partitionsForTopic("keepValidTopic").size(), 2);
    assertEquals(cluster.partitionsForTopic("newValidTopic").size(), 4);
    assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values()));
    // Perform another metadata update, but this time all topic metadata should be cleared.
    retainTopics.set(Collections.emptySet());
    metadataResponse = RequestTestUtils.metadataUpdateWithIds(newClusterId, newNodes, newTopicErrors, newTopicPartitionCounts, _tp -> 300, topicIds);
    metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
    Map<String, Uuid> metadataTopicIds3 = metadata.topicIds();
    topicIds.forEach((topicName, topicId) -> assertNull(metadataTopicIds3.get(topicName)));
    cluster = metadata.fetch();
    assertEquals(cluster.clusterResource().clusterId(), newClusterId);
    assertEquals(cluster.nodes().size(), newNodes);
    assertEquals(cluster.invalidTopics(), Collections.emptySet());
    assertEquals(cluster.unauthorizedTopics(), Collections.emptySet());
    assertEquals(cluster.topics(), Collections.emptySet());
    assertTrue(cluster.topicIds().isEmpty());
}
Also used : Uuid(org.apache.kafka.common.Uuid) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) MessageUtil(org.apache.kafka.common.protocol.MessageUtil) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) MockClusterResourceListener(org.apache.kafka.test.MockClusterResourceListener) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) RequestTestUtils(org.apache.kafka.common.requests.RequestTestUtils) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteBuffer(java.nio.ByteBuffer) HashSet(java.util.HashSet) Cluster(org.apache.kafka.common.Cluster) MetadataResponseBrokerCollection(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseBrokerCollection) MetadataResponseTopic(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TestUtils.assertOptional(org.apache.kafka.test.TestUtils.assertOptional) Topic(org.apache.kafka.common.internals.Topic) Utils(org.apache.kafka.common.utils.Utils) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponseTopicCollection(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopicCollection) Time(org.apache.kafka.common.utils.Time) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Set(java.util.Set) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) InetSocketAddress(java.net.InetSocketAddress) Test(org.junit.jupiter.api.Test) Objects(java.util.Objects) List(java.util.List) MetadataResponsePartition(org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) Errors(org.apache.kafka.common.protocol.Errors) Optional(java.util.Optional) Node(org.apache.kafka.common.Node) MetadataResponseData(org.apache.kafka.common.message.MetadataResponseData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) Collections(java.util.Collections) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) LogContext(org.apache.kafka.common.utils.LogContext) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) AtomicReference(java.util.concurrent.atomic.AtomicReference) Errors(org.apache.kafka.common.protocol.Errors) Uuid(org.apache.kafka.common.Uuid) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.jupiter.api.Test)

Aggregations

Uuid (org.apache.kafka.common.Uuid)95 Test (org.junit.jupiter.api.Test)55 HashMap (java.util.HashMap)42 TopicPartition (org.apache.kafka.common.TopicPartition)40 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)30 ArrayList (java.util.ArrayList)29 Map (java.util.Map)21 ApiMessageAndVersion (org.apache.kafka.server.common.ApiMessageAndVersion)21 LinkedHashMap (java.util.LinkedHashMap)18 List (java.util.List)15 FetchRequest (org.apache.kafka.common.requests.FetchRequest)14 TopicIdPartition (org.apache.kafka.common.TopicIdPartition)13 Errors (org.apache.kafka.common.protocol.Errors)12 FetchResponse (org.apache.kafka.common.requests.FetchResponse)12 Collections (java.util.Collections)11 ByteBuffer (java.nio.ByteBuffer)10 Node (org.apache.kafka.common.Node)10 CreateTopicsResponseData (org.apache.kafka.common.message.CreateTopicsResponseData)10 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)10 PartitionRegistration (org.apache.kafka.metadata.PartitionRegistration)10