Search in sources :

Example 1 with Partition

use of io.confluent.kafkarest.entities.Partition in project kafka-rest by confluentinc.

the class TopicManagerImpl method createTopic.

@Override
public CompletableFuture<Void> createTopic(String clusterId, String topicName, Optional<Integer> partitionsCount, Optional<Short> replicationFactor, Map<Integer, List<Integer>> replicasAssignments, Map<String, Optional<String>> configs) {
    requireNonNull(topicName);
    Map<String, String> nullableConfigs = new HashMap<>();
    configs.forEach((key, value) -> nullableConfigs.put(key, value.orElse(null)));
    // A new topic can be created with either uniform replication according to the given partitions
    // count and replication factor, or explicitly specified partition-to-replicas assignments.
    NewTopic createTopicRequest = replicasAssignments.isEmpty() ? new NewTopic(topicName, partitionsCount, replicationFactor).configs(nullableConfigs) : new NewTopic(topicName, replicasAssignments).configs(nullableConfigs);
    return clusterManager.getCluster(clusterId).thenApply(cluster -> checkEntityExists(cluster, "Cluster %s cannot be found.", clusterId)).thenCompose(cluster -> KafkaFutures.toCompletableFuture(adminClient.createTopics(singletonList(createTopicRequest)).all()));
}
Also used : DescribeTopicsOptions(org.apache.kafka.clients.admin.DescribeTopicsOptions) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) KafkaFutures(io.confluent.kafkarest.common.KafkaFutures) Collections.singletonList(java.util.Collections.singletonList) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Inject(javax.inject.Inject) Objects.requireNonNull(java.util.Objects.requireNonNull) Map(java.util.Map) Admin(org.apache.kafka.clients.admin.Admin) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) Collections.emptySet(java.util.Collections.emptySet) Collections.emptyList(java.util.Collections.emptyList) NewTopic(org.apache.kafka.clients.admin.NewTopic) Set(java.util.Set) TopicListing(org.apache.kafka.clients.admin.TopicListing) Partition(io.confluent.kafkarest.entities.Partition) PartitionReplica(io.confluent.kafkarest.entities.PartitionReplica) Collectors(java.util.stream.Collectors) List(java.util.List) Entities.checkEntityExists(io.confluent.kafkarest.controllers.Entities.checkEntityExists) Topic(io.confluent.kafkarest.entities.Topic) Optional(java.util.Optional) Node(org.apache.kafka.common.Node) Acl(io.confluent.kafkarest.entities.Acl) HashMap(java.util.HashMap) NewTopic(org.apache.kafka.clients.admin.NewTopic)

Example 2 with Partition

use of io.confluent.kafkarest.entities.Partition in project kafka-rest by confluentinc.

the class PartitionManagerImpl method withOffsets.

private CompletableFuture<List<Partition>> withOffsets(List<Partition> partitions) {
    if (partitions.isEmpty()) {
        return completedFuture(emptyList());
    }
    ListOffsetsResult earliestResponse = listOffsets(partitions, OffsetSpec.earliest());
    ListOffsetsResult latestResponse = listOffsets(partitions, OffsetSpec.latest());
    List<CompletableFuture<Partition>> partitionsWithOffsets = new ArrayList<>();
    for (Partition partition : partitions) {
        CompletableFuture<ListOffsetsResultInfo> earliestFuture = KafkaFutures.toCompletableFuture(earliestResponse.partitionResult(toTopicPartition(partition)));
        CompletableFuture<ListOffsetsResultInfo> latestFuture = KafkaFutures.toCompletableFuture(latestResponse.partitionResult(toTopicPartition(partition)));
        CompletableFuture<Partition> partitionWithOffset = earliestFuture.thenCombine(latestFuture, (earliest, latest) -> Partition.create(partition.getClusterId(), partition.getTopicName(), partition.getPartitionId(), partition.getReplicas(), earliest.offset(), latest.offset()));
        partitionsWithOffsets.add(partitionWithOffset);
    }
    return CompletableFutures.allAsList(partitionsWithOffsets);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Partition(io.confluent.kafkarest.entities.Partition) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) CompletableFuture(java.util.concurrent.CompletableFuture) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) ArrayList(java.util.ArrayList)

Example 3 with Partition

use of io.confluent.kafkarest.entities.Partition in project kafka-rest by confluentinc.

the class ConsumerGroupManagerImplTest method getConsumerGroup_returnsConsumerGroup.

@Test
public void getConsumerGroup_returnsConsumerGroup() throws Exception {
    expect(clusterManager.getCluster(CLUSTER_ID)).andReturn(completedFuture(Optional.of(CLUSTER)));
    expect(adminClient.describeConsumerGroups(singletonList(CONSUMER_GROUPS[0].getConsumerGroupId()))).andReturn(describeConsumerGroupsResult);
    expect(describeConsumerGroupsResult.all()).andReturn(KafkaFuture.completedFuture(singletonMap(CONSUMER_GROUPS[0].getConsumerGroupId(), consumerGroupDescriptions[0])));
    expect(consumerGroupDescriptions[0].groupId()).andStubReturn(CONSUMER_GROUPS[0].getConsumerGroupId());
    expect(consumerGroupDescriptions[0].isSimpleConsumerGroup()).andStubReturn(CONSUMER_GROUPS[0].isSimple());
    expect(consumerGroupDescriptions[0].partitionAssignor()).andStubReturn(CONSUMER_GROUPS[0].getPartitionAssignor());
    expect(consumerGroupDescriptions[0].state()).andStubReturn(CONSUMER_GROUPS[0].getState().toConsumerGroupState());
    expect(consumerGroupDescriptions[0].coordinator()).andStubReturn(CONSUMER_GROUPS[0].getCoordinator().toNode());
    expect(consumerGroupDescriptions[0].members()).andStubReturn(Arrays.asList(memberDescriptions[0]));
    for (int j = 0; j < CONSUMER_GROUPS[0].getConsumers().size(); j++) {
        expect(memberDescriptions[0][j].consumerId()).andStubReturn(CONSUMERS[0][j].getConsumerId());
        expect(memberDescriptions[0][j].groupInstanceId()).andStubReturn(CONSUMERS[0][j].getInstanceId());
        expect(memberDescriptions[0][j].clientId()).andStubReturn(CONSUMERS[0][j].getClientId());
        expect(memberDescriptions[0][j].host()).andStubReturn(CONSUMERS[0][j].getHost());
        expect(memberDescriptions[0][j].assignment()).andStubReturn(memberAssignments[0][j]);
        expect(memberAssignments[0][j].topicPartitions()).andStubReturn(CONSUMERS[0][j].getAssignedPartitions().stream().map(Partition::toTopicPartition).collect(Collectors.toSet()));
        replay(memberDescriptions[0][j], memberAssignments[0][j]);
    }
    replay(clusterManager, adminClient, listConsumerGroupsResult, describeConsumerGroupsResult, consumerGroupDescriptions[0]);
    ConsumerGroup consumerGroup = consumerGroupManager.getConsumerGroup(CLUSTER_ID, CONSUMER_GROUPS[0].getConsumerGroupId()).get().get();
    assertEquals(CONSUMER_GROUPS[0], consumerGroup);
}
Also used : Partition(io.confluent.kafkarest.entities.Partition) ConsumerGroup(io.confluent.kafkarest.entities.ConsumerGroup) Test(org.junit.jupiter.api.Test)

Aggregations

Partition (io.confluent.kafkarest.entities.Partition)3 ArrayList (java.util.ArrayList)2 CompletableFuture (java.util.concurrent.CompletableFuture)2 KafkaFutures (io.confluent.kafkarest.common.KafkaFutures)1 Entities.checkEntityExists (io.confluent.kafkarest.controllers.Entities.checkEntityExists)1 Acl (io.confluent.kafkarest.entities.Acl)1 ConsumerGroup (io.confluent.kafkarest.entities.ConsumerGroup)1 PartitionReplica (io.confluent.kafkarest.entities.PartitionReplica)1 Topic (io.confluent.kafkarest.entities.Topic)1 Collections.emptyList (java.util.Collections.emptyList)1 Collections.emptySet (java.util.Collections.emptySet)1 Collections.singletonList (java.util.Collections.singletonList)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 List (java.util.List)1 Map (java.util.Map)1 Objects.requireNonNull (java.util.Objects.requireNonNull)1 Optional (java.util.Optional)1 Set (java.util.Set)1 Collectors (java.util.stream.Collectors)1