use of io.confluent.kafkarest.entities.Partition in project kafka-rest by confluentinc.
the class TopicManagerImpl method createTopic.
@Override
public CompletableFuture<Void> createTopic(String clusterId, String topicName, Optional<Integer> partitionsCount, Optional<Short> replicationFactor, Map<Integer, List<Integer>> replicasAssignments, Map<String, Optional<String>> configs) {
requireNonNull(topicName);
Map<String, String> nullableConfigs = new HashMap<>();
configs.forEach((key, value) -> nullableConfigs.put(key, value.orElse(null)));
// A new topic can be created with either uniform replication according to the given partitions
// count and replication factor, or explicitly specified partition-to-replicas assignments.
NewTopic createTopicRequest = replicasAssignments.isEmpty() ? new NewTopic(topicName, partitionsCount, replicationFactor).configs(nullableConfigs) : new NewTopic(topicName, replicasAssignments).configs(nullableConfigs);
return clusterManager.getCluster(clusterId).thenApply(cluster -> checkEntityExists(cluster, "Cluster %s cannot be found.", clusterId)).thenCompose(cluster -> KafkaFutures.toCompletableFuture(adminClient.createTopics(singletonList(createTopicRequest)).all()));
}
use of io.confluent.kafkarest.entities.Partition in project kafka-rest by confluentinc.
the class PartitionManagerImpl method withOffsets.
private CompletableFuture<List<Partition>> withOffsets(List<Partition> partitions) {
if (partitions.isEmpty()) {
return completedFuture(emptyList());
}
ListOffsetsResult earliestResponse = listOffsets(partitions, OffsetSpec.earliest());
ListOffsetsResult latestResponse = listOffsets(partitions, OffsetSpec.latest());
List<CompletableFuture<Partition>> partitionsWithOffsets = new ArrayList<>();
for (Partition partition : partitions) {
CompletableFuture<ListOffsetsResultInfo> earliestFuture = KafkaFutures.toCompletableFuture(earliestResponse.partitionResult(toTopicPartition(partition)));
CompletableFuture<ListOffsetsResultInfo> latestFuture = KafkaFutures.toCompletableFuture(latestResponse.partitionResult(toTopicPartition(partition)));
CompletableFuture<Partition> partitionWithOffset = earliestFuture.thenCombine(latestFuture, (earliest, latest) -> Partition.create(partition.getClusterId(), partition.getTopicName(), partition.getPartitionId(), partition.getReplicas(), earliest.offset(), latest.offset()));
partitionsWithOffsets.add(partitionWithOffset);
}
return CompletableFutures.allAsList(partitionsWithOffsets);
}
use of io.confluent.kafkarest.entities.Partition in project kafka-rest by confluentinc.
the class ConsumerGroupManagerImplTest method getConsumerGroup_returnsConsumerGroup.
@Test
public void getConsumerGroup_returnsConsumerGroup() throws Exception {
expect(clusterManager.getCluster(CLUSTER_ID)).andReturn(completedFuture(Optional.of(CLUSTER)));
expect(adminClient.describeConsumerGroups(singletonList(CONSUMER_GROUPS[0].getConsumerGroupId()))).andReturn(describeConsumerGroupsResult);
expect(describeConsumerGroupsResult.all()).andReturn(KafkaFuture.completedFuture(singletonMap(CONSUMER_GROUPS[0].getConsumerGroupId(), consumerGroupDescriptions[0])));
expect(consumerGroupDescriptions[0].groupId()).andStubReturn(CONSUMER_GROUPS[0].getConsumerGroupId());
expect(consumerGroupDescriptions[0].isSimpleConsumerGroup()).andStubReturn(CONSUMER_GROUPS[0].isSimple());
expect(consumerGroupDescriptions[0].partitionAssignor()).andStubReturn(CONSUMER_GROUPS[0].getPartitionAssignor());
expect(consumerGroupDescriptions[0].state()).andStubReturn(CONSUMER_GROUPS[0].getState().toConsumerGroupState());
expect(consumerGroupDescriptions[0].coordinator()).andStubReturn(CONSUMER_GROUPS[0].getCoordinator().toNode());
expect(consumerGroupDescriptions[0].members()).andStubReturn(Arrays.asList(memberDescriptions[0]));
for (int j = 0; j < CONSUMER_GROUPS[0].getConsumers().size(); j++) {
expect(memberDescriptions[0][j].consumerId()).andStubReturn(CONSUMERS[0][j].getConsumerId());
expect(memberDescriptions[0][j].groupInstanceId()).andStubReturn(CONSUMERS[0][j].getInstanceId());
expect(memberDescriptions[0][j].clientId()).andStubReturn(CONSUMERS[0][j].getClientId());
expect(memberDescriptions[0][j].host()).andStubReturn(CONSUMERS[0][j].getHost());
expect(memberDescriptions[0][j].assignment()).andStubReturn(memberAssignments[0][j]);
expect(memberAssignments[0][j].topicPartitions()).andStubReturn(CONSUMERS[0][j].getAssignedPartitions().stream().map(Partition::toTopicPartition).collect(Collectors.toSet()));
replay(memberDescriptions[0][j], memberAssignments[0][j]);
}
replay(clusterManager, adminClient, listConsumerGroupsResult, describeConsumerGroupsResult, consumerGroupDescriptions[0]);
ConsumerGroup consumerGroup = consumerGroupManager.getConsumerGroup(CLUSTER_ID, CONSUMER_GROUPS[0].getConsumerGroupId()).get().get();
assertEquals(CONSUMER_GROUPS[0], consumerGroup);
}
Aggregations