Search in sources :

Example 11 with CreatableTopicResult

use of org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult in project kafka by apache.

the class ReplicationControlManagerTest method testInvalidCreateTopicsWithValidateOnlyFlag.

@Test
public void testInvalidCreateTopicsWithValidateOnlyFlag() throws Exception {
    ReplicationControlTestContext ctx = new ReplicationControlTestContext();
    ctx.registerBrokers(0, 1, 2);
    ctx.unfenceBrokers(0, 1, 2);
    CreateTopicsRequestData request = new CreateTopicsRequestData().setValidateOnly(true);
    request.topics().add(new CreatableTopic().setName("foo").setNumPartitions(1).setReplicationFactor((short) 4));
    ControllerResult<CreateTopicsResponseData> result = ctx.replicationControl.createTopics(request);
    assertEquals(0, result.records().size());
    CreateTopicsResponseData expectedResponse = new CreateTopicsResponseData();
    expectedResponse.topics().add(new CreatableTopicResult().setName("foo").setErrorCode(Errors.INVALID_REPLICATION_FACTOR.code()).setErrorMessage("Unable to replicate the partition 4 time(s): The target " + "replication factor of 4 cannot be reached because only 3 broker(s) " + "are registered."));
    assertEquals(expectedResponse, result.response());
}
Also used : CreatableTopic(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic) CreateTopicsRequestData(org.apache.kafka.common.message.CreateTopicsRequestData) CreatableTopicResult(org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult) CreateTopicsResponseData(org.apache.kafka.common.message.CreateTopicsResponseData) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 12 with CreatableTopicResult

use of org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult in project kafka by apache.

the class ReplicationControlManagerTest method testOfflinePartitionAndReplicaImbalanceMetrics.

@Test
public void testOfflinePartitionAndReplicaImbalanceMetrics() throws Exception {
    ReplicationControlTestContext ctx = new ReplicationControlTestContext();
    ReplicationControlManager replicationControl = ctx.replicationControl;
    ctx.registerBrokers(0, 1, 2, 3);
    ctx.unfenceBrokers(0, 1, 2, 3);
    CreatableTopicResult foo = ctx.createTestTopic("foo", new int[][] { new int[] { 0, 2 }, new int[] { 0, 1 } });
    CreatableTopicResult zar = ctx.createTestTopic("zar", new int[][] { new int[] { 0, 1, 2 }, new int[] { 1, 2, 3 }, new int[] { 1, 2, 0 } });
    ControllerResult<Void> result = replicationControl.unregisterBroker(0);
    ctx.replay(result.records());
    // All partitions should still be online after unregistering broker 0
    assertEquals(0, ctx.metrics.offlinePartitionCount());
    // Three partitions should not have their preferred (first) replica 0
    assertEquals(3, ctx.metrics.preferredReplicaImbalanceCount());
    result = replicationControl.unregisterBroker(1);
    ctx.replay(result.records());
    // After unregistering broker 1, 1 partition for topic foo should go offline
    assertEquals(1, ctx.metrics.offlinePartitionCount());
    // All five partitions should not have their preferred (first) replica at this point
    assertEquals(5, ctx.metrics.preferredReplicaImbalanceCount());
    result = replicationControl.unregisterBroker(2);
    ctx.replay(result.records());
    // After unregistering broker 2, the last partition for topic foo should go offline
    // and 2 partitions for topic zar should go offline
    assertEquals(4, ctx.metrics.offlinePartitionCount());
    result = replicationControl.unregisterBroker(3);
    ctx.replay(result.records());
    // After unregistering broker 3 the last partition for topic zar should go offline
    assertEquals(5, ctx.metrics.offlinePartitionCount());
    // Deleting topic foo should bring the offline partition count down to 3
    ArrayList<ApiMessageAndVersion> records = new ArrayList<>();
    replicationControl.deleteTopic(foo.topicId(), records);
    ctx.replay(records);
    assertEquals(3, ctx.metrics.offlinePartitionCount());
    // Deleting topic zar should bring the offline partition count down to 0
    records = new ArrayList<>();
    replicationControl.deleteTopic(zar.topicId(), records);
    ctx.replay(records);
    assertEquals(0, ctx.metrics.offlinePartitionCount());
}
Also used : ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) ArrayList(java.util.ArrayList) CreatableTopicResult(org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 13 with CreatableTopicResult

use of org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult in project kafka by apache.

the class ReplicationControlManager method createTopic.

private ApiError createTopic(CreatableTopic topic, List<ApiMessageAndVersion> records, Map<String, CreatableTopicResult> successes) {
    Map<Integer, PartitionRegistration> newParts = new HashMap<>();
    if (!topic.assignments().isEmpty()) {
        if (topic.replicationFactor() != -1) {
            return new ApiError(INVALID_REQUEST, "A manual partition assignment was specified, but replication " + "factor was not set to -1.");
        }
        if (topic.numPartitions() != -1) {
            return new ApiError(INVALID_REQUEST, "A manual partition assignment was specified, but numPartitions " + "was not set to -1.");
        }
        OptionalInt replicationFactor = OptionalInt.empty();
        for (CreatableReplicaAssignment assignment : topic.assignments()) {
            if (newParts.containsKey(assignment.partitionIndex())) {
                return new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT, "Found multiple manual partition assignments for partition " + assignment.partitionIndex());
            }
            validateManualPartitionAssignment(assignment.brokerIds(), replicationFactor);
            replicationFactor = OptionalInt.of(assignment.brokerIds().size());
            List<Integer> isr = assignment.brokerIds().stream().filter(clusterControl::unfenced).collect(Collectors.toList());
            if (isr.isEmpty()) {
                return new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT, "All brokers specified in the manual partition assignment for " + "partition " + assignment.partitionIndex() + " are fenced.");
            }
            newParts.put(assignment.partitionIndex(), new PartitionRegistration(Replicas.toArray(assignment.brokerIds()), Replicas.toArray(isr), Replicas.NONE, Replicas.NONE, isr.get(0), 0, 0));
        }
        ApiError error = maybeCheckCreateTopicPolicy(() -> {
            Map<Integer, List<Integer>> assignments = new HashMap<>();
            newParts.entrySet().forEach(e -> assignments.put(e.getKey(), Replicas.toList(e.getValue().replicas)));
            Map<String, String> configs = new HashMap<>();
            topic.configs().forEach(config -> configs.put(config.name(), config.value()));
            return new CreateTopicPolicy.RequestMetadata(topic.name(), null, null, assignments, configs);
        });
        if (error.isFailure())
            return error;
    } else if (topic.replicationFactor() < -1 || topic.replicationFactor() == 0) {
        return new ApiError(Errors.INVALID_REPLICATION_FACTOR, "Replication factor must be larger than 0, or -1 to use the default value.");
    } else if (topic.numPartitions() < -1 || topic.numPartitions() == 0) {
        return new ApiError(Errors.INVALID_PARTITIONS, "Number of partitions was set to an invalid non-positive value.");
    } else {
        int numPartitions = topic.numPartitions() == -1 ? defaultNumPartitions : topic.numPartitions();
        short replicationFactor = topic.replicationFactor() == -1 ? defaultReplicationFactor : topic.replicationFactor();
        try {
            List<List<Integer>> replicas = clusterControl.placeReplicas(0, numPartitions, replicationFactor);
            for (int partitionId = 0; partitionId < replicas.size(); partitionId++) {
                int[] r = Replicas.toArray(replicas.get(partitionId));
                newParts.put(partitionId, new PartitionRegistration(r, r, Replicas.NONE, Replicas.NONE, r[0], 0, 0));
            }
        } catch (InvalidReplicationFactorException e) {
            return new ApiError(Errors.INVALID_REPLICATION_FACTOR, "Unable to replicate the partition " + replicationFactor + " time(s): " + e.getMessage());
        }
        ApiError error = maybeCheckCreateTopicPolicy(() -> {
            Map<String, String> configs = new HashMap<>();
            topic.configs().forEach(config -> configs.put(config.name(), config.value()));
            return new CreateTopicPolicy.RequestMetadata(topic.name(), numPartitions, replicationFactor, null, configs);
        });
        if (error.isFailure())
            return error;
    }
    Uuid topicId = Uuid.randomUuid();
    successes.put(topic.name(), new CreatableTopicResult().setName(topic.name()).setTopicId(topicId).setErrorCode((short) 0).setErrorMessage(null).setNumPartitions(newParts.size()).setReplicationFactor((short) newParts.get(0).replicas.length));
    records.add(new ApiMessageAndVersion(new TopicRecord().setName(topic.name()).setTopicId(topicId), TOPIC_RECORD.highestSupportedVersion()));
    for (Entry<Integer, PartitionRegistration> partEntry : newParts.entrySet()) {
        int partitionIndex = partEntry.getKey();
        PartitionRegistration info = partEntry.getValue();
        records.add(info.toRecord(topicId, partitionIndex));
    }
    return ApiError.NONE;
}
Also used : PartitionRegistration(org.apache.kafka.metadata.PartitionRegistration) RemoveTopicRecord(org.apache.kafka.common.metadata.RemoveTopicRecord) TopicRecord(org.apache.kafka.common.metadata.TopicRecord) TimelineHashMap(org.apache.kafka.timeline.TimelineHashMap) HashMap(java.util.HashMap) OptionalInt(java.util.OptionalInt) TimelineInteger(org.apache.kafka.timeline.TimelineInteger) Uuid(org.apache.kafka.common.Uuid) CreatableReplicaAssignment(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignment) InvalidReplicationFactorException(org.apache.kafka.common.errors.InvalidReplicationFactorException) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) CreatableTopicResult(org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult) ArrayList(java.util.ArrayList) List(java.util.List) ApiError(org.apache.kafka.common.requests.ApiError)

Example 14 with CreatableTopicResult

use of org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult in project kafka by apache.

the class ReplicationControlManager method createTopics.

ControllerResult<CreateTopicsResponseData> createTopics(CreateTopicsRequestData request) {
    Map<String, ApiError> topicErrors = new HashMap<>();
    List<ApiMessageAndVersion> records = new ArrayList<>();
    // Check the topic names.
    validateNewTopicNames(topicErrors, request.topics());
    // Identify topics that already exist and mark them with the appropriate error
    request.topics().stream().filter(creatableTopic -> topicsByName.containsKey(creatableTopic.name())).forEach(t -> topicErrors.put(t.name(), new ApiError(Errors.TOPIC_ALREADY_EXISTS, "Topic '" + t.name() + "' already exists.")));
    // Verify that the configurations for the new topics are OK, and figure out what
    // ConfigRecords should be created.
    Map<ConfigResource, Map<String, Entry<OpType, String>>> configChanges = computeConfigChanges(topicErrors, request.topics());
    ControllerResult<Map<ConfigResource, ApiError>> configResult = configurationControl.incrementalAlterConfigs(configChanges, NO_OP_EXISTENCE_CHECKER);
    for (Entry<ConfigResource, ApiError> entry : configResult.response().entrySet()) {
        if (entry.getValue().isFailure()) {
            topicErrors.put(entry.getKey().name(), entry.getValue());
        }
    }
    records.addAll(configResult.records());
    // Try to create whatever topics are needed.
    Map<String, CreatableTopicResult> successes = new HashMap<>();
    for (CreatableTopic topic : request.topics()) {
        if (topicErrors.containsKey(topic.name()))
            continue;
        ApiError error;
        try {
            error = createTopic(topic, records, successes);
        } catch (ApiException e) {
            error = ApiError.fromThrowable(e);
        }
        if (error.isFailure()) {
            topicErrors.put(topic.name(), error);
        }
    }
    // Create responses for all topics.
    CreateTopicsResponseData data = new CreateTopicsResponseData();
    StringBuilder resultsBuilder = new StringBuilder();
    String resultsPrefix = "";
    for (CreatableTopic topic : request.topics()) {
        ApiError error = topicErrors.get(topic.name());
        if (error != null) {
            data.topics().add(new CreatableTopicResult().setName(topic.name()).setErrorCode(error.error().code()).setErrorMessage(error.message()));
            resultsBuilder.append(resultsPrefix).append(topic).append(": ").append(error.error()).append(" (").append(error.message()).append(")");
            resultsPrefix = ", ";
            continue;
        }
        CreatableTopicResult result = successes.get(topic.name());
        data.topics().add(result);
        resultsBuilder.append(resultsPrefix).append(topic).append(": ").append("SUCCESS");
        resultsPrefix = ", ";
    }
    if (request.validateOnly()) {
        log.info("Validate-only CreateTopics result(s): {}", resultsBuilder.toString());
        return ControllerResult.atomicOf(Collections.emptyList(), data);
    } else {
        log.info("CreateTopics result(s): {}", resultsBuilder.toString());
        return ControllerResult.atomicOf(records, data);
    }
}
Also used : ListPartitionReassignmentsTopics(org.apache.kafka.common.message.ListPartitionReassignmentsRequestData.ListPartitionReassignmentsTopics) OpType(org.apache.kafka.clients.admin.AlterConfigOp.OpType) InvalidReplicationFactorException(org.apache.kafka.common.errors.InvalidReplicationFactorException) ElectLeadersRequestData(org.apache.kafka.common.message.ElectLeadersRequestData) CreatePartitionsTopic(org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic) PartitionResult(org.apache.kafka.common.message.ElectLeadersResponseData.PartitionResult) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) TimelineInteger(org.apache.kafka.timeline.TimelineInteger) SET(org.apache.kafka.clients.admin.AlterConfigOp.OpType.SET) AlterIsrResponseData(org.apache.kafka.common.message.AlterIsrResponseData) NoReassignmentInProgressException(org.apache.kafka.common.errors.NoReassignmentInProgressException) INVALID_REQUEST(org.apache.kafka.common.protocol.Errors.INVALID_REQUEST) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) UNREGISTER_BROKER_RECORD(org.apache.kafka.common.metadata.MetadataRecordType.UNREGISTER_BROKER_RECORD) SnapshotRegistry(org.apache.kafka.timeline.SnapshotRegistry) NO_LEADER(org.apache.kafka.metadata.LeaderConstants.NO_LEADER) InvalidPartitionsException(org.apache.kafka.common.errors.InvalidPartitionsException) UNKNOWN_TOPIC_ID(org.apache.kafka.common.protocol.Errors.UNKNOWN_TOPIC_ID) PartitionChangeRecord(org.apache.kafka.common.metadata.PartitionChangeRecord) Errors(org.apache.kafka.common.protocol.Errors) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) CreatableTopicResult(org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult) RemoveTopicRecord(org.apache.kafka.common.metadata.RemoveTopicRecord) TimelineHashMap(org.apache.kafka.timeline.TimelineHashMap) NO_REASSIGNMENT_IN_PROGRESS(org.apache.kafka.common.protocol.Errors.NO_REASSIGNMENT_IN_PROGRESS) BrokerRegistration(org.apache.kafka.metadata.BrokerRegistration) Supplier(java.util.function.Supplier) TOPIC(org.apache.kafka.common.config.ConfigResource.Type.TOPIC) ArrayList(java.util.ArrayList) FENCED_LEADER_EPOCH(org.apache.kafka.common.protocol.Errors.FENCED_LEADER_EPOCH) UnfenceBrokerRecord(org.apache.kafka.common.metadata.UnfenceBrokerRecord) ElectionType(org.apache.kafka.common.ElectionType) BrokerHeartbeatReply(org.apache.kafka.metadata.BrokerHeartbeatReply) AlterPartitionReassignmentsRequestData(org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData) UnknownTopicIdException(org.apache.kafka.common.errors.UnknownTopicIdException) Topic(org.apache.kafka.common.internals.Topic) CreatableReplicaAssignment(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignment) FENCE_BROKER_RECORD(org.apache.kafka.common.metadata.MetadataRecordType.FENCE_BROKER_RECORD) ElectLeadersResponseData(org.apache.kafka.common.message.ElectLeadersResponseData) ReassignablePartition(org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignablePartition) BrokerHeartbeatRequestData(org.apache.kafka.common.message.BrokerHeartbeatRequestData) CreatableTopicCollection(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCollection) CreateTopicsRequestData(org.apache.kafka.common.message.CreateTopicsRequestData) ReassignableTopic(org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignableTopic) PartitionRecord(org.apache.kafka.common.metadata.PartitionRecord) CreatePartitionsAssignment(org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsAssignment) UNFENCE_BROKER_RECORD(org.apache.kafka.common.metadata.MetadataRecordType.UNFENCE_BROKER_RECORD) PARTITION_RECORD(org.apache.kafka.common.metadata.MetadataRecordType.PARTITION_RECORD) BrokerIdNotRegisteredException(org.apache.kafka.common.errors.BrokerIdNotRegisteredException) ListIterator(java.util.ListIterator) CreatePartitionsTopicResult(org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartitionsTopicResult) ReassignableTopicResponse(org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignableTopicResponse) ReplicaElectionResult(org.apache.kafka.common.message.ElectLeadersResponseData.ReplicaElectionResult) TOPIC_RECORD(org.apache.kafka.common.metadata.MetadataRecordType.TOPIC_RECORD) FenceBrokerRecord(org.apache.kafka.common.metadata.FenceBrokerRecord) TopicRecord(org.apache.kafka.common.metadata.TopicRecord) Collection(java.util.Collection) UnregisterBrokerRecord(org.apache.kafka.common.metadata.UnregisterBrokerRecord) TopicPartitions(org.apache.kafka.common.message.ElectLeadersRequestData.TopicPartitions) PartitionRegistration(org.apache.kafka.metadata.PartitionRegistration) Collectors(java.util.stream.Collectors) Replicas(org.apache.kafka.metadata.Replicas) INVALID_UPDATE_VERSION(org.apache.kafka.common.protocol.Errors.INVALID_UPDATE_VERSION) TopicIdPartition(org.apache.kafka.controller.BrokersToIsrs.TopicIdPartition) List(java.util.List) NO_LEADER_CHANGE(org.apache.kafka.metadata.LeaderConstants.NO_LEADER_CHANGE) Entry(java.util.Map.Entry) Optional(java.util.Optional) CreateTopicsResponseData(org.apache.kafka.common.message.CreateTopicsResponseData) Uuid(org.apache.kafka.common.Uuid) ReassignablePartitionResponse(org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignablePartitionResponse) ListPartitionReassignmentsResponseData(org.apache.kafka.common.message.ListPartitionReassignmentsResponseData) HashMap(java.util.HashMap) REMOVE_TOPIC_RECORD(org.apache.kafka.common.metadata.MetadataRecordType.REMOVE_TOPIC_RECORD) SimpleImmutableEntry(java.util.AbstractMap.SimpleImmutableEntry) CreatableTopic(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic) Function(java.util.function.Function) OptionalInt(java.util.OptionalInt) ApiError(org.apache.kafka.common.requests.ApiError) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) ConfigResource(org.apache.kafka.common.config.ConfigResource) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) PolicyViolationException(org.apache.kafka.common.errors.PolicyViolationException) OngoingPartitionReassignment(org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingPartitionReassignment) OngoingTopicReassignment(org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingTopicReassignment) NoSuchElementException(java.util.NoSuchElementException) UNKNOWN_TOPIC_OR_PARTITION(org.apache.kafka.common.protocol.Errors.UNKNOWN_TOPIC_OR_PARTITION) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) InvalidReplicaAssignmentException(org.apache.kafka.common.errors.InvalidReplicaAssignmentException) NO_OP_EXISTENCE_CHECKER(org.apache.kafka.controller.ConfigurationControlManager.NO_OP_EXISTENCE_CHECKER) AlterPartitionReassignmentsResponseData(org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData) CreateTopicPolicy(org.apache.kafka.server.policy.CreateTopicPolicy) AlterIsrRequestData(org.apache.kafka.common.message.AlterIsrRequestData) InvalidRequestException(org.apache.kafka.common.errors.InvalidRequestException) Collections(java.util.Collections) ApiException(org.apache.kafka.common.errors.ApiException) TimelineHashMap(org.apache.kafka.timeline.TimelineHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) CreateTopicsResponseData(org.apache.kafka.common.message.CreateTopicsResponseData) ConfigResource(org.apache.kafka.common.config.ConfigResource) CreatableTopic(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) CreatableTopicResult(org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult) OpType(org.apache.kafka.clients.admin.AlterConfigOp.OpType) ApiError(org.apache.kafka.common.requests.ApiError) Map(java.util.Map) TimelineHashMap(org.apache.kafka.timeline.TimelineHashMap) HashMap(java.util.HashMap) ApiException(org.apache.kafka.common.errors.ApiException)

Example 15 with CreatableTopicResult

use of org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult in project kafka by apache.

the class KafkaAdminClient method getCreateTopicsCall.

private Call getCreateTopicsCall(final CreateTopicsOptions options, final Map<String, KafkaFutureImpl<TopicMetadataAndConfig>> futures, final CreatableTopicCollection topics, final Map<String, ThrottlingQuotaExceededException> quotaExceededExceptions, final long now, final long deadline) {
    return new Call("createTopics", deadline, new ControllerNodeProvider()) {

        @Override
        public CreateTopicsRequest.Builder createRequest(int timeoutMs) {
            return new CreateTopicsRequest.Builder(new CreateTopicsRequestData().setTopics(topics).setTimeoutMs(timeoutMs).setValidateOnly(options.shouldValidateOnly()));
        }

        @Override
        public void handleResponse(AbstractResponse abstractResponse) {
            // Check for controller change
            handleNotControllerError(abstractResponse);
            // Handle server responses for particular topics.
            final CreateTopicsResponse response = (CreateTopicsResponse) abstractResponse;
            final CreatableTopicCollection retryTopics = new CreatableTopicCollection();
            final Map<String, ThrottlingQuotaExceededException> retryTopicQuotaExceededExceptions = new HashMap<>();
            for (CreatableTopicResult result : response.data().topics()) {
                KafkaFutureImpl<TopicMetadataAndConfig> future = futures.get(result.name());
                if (future == null) {
                    log.warn("Server response mentioned unknown topic {}", result.name());
                } else {
                    ApiError error = new ApiError(result.errorCode(), result.errorMessage());
                    if (error.isFailure()) {
                        if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
                            ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(response.throttleTimeMs(), error.messageWithFallback());
                            if (options.shouldRetryOnQuotaViolation()) {
                                retryTopics.add(topics.find(result.name()).duplicate());
                                retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException);
                            } else {
                                future.completeExceptionally(quotaExceededException);
                            }
                        } else {
                            future.completeExceptionally(error.exception());
                        }
                    } else {
                        TopicMetadataAndConfig topicMetadataAndConfig;
                        if (result.topicConfigErrorCode() != Errors.NONE.code()) {
                            topicMetadataAndConfig = new TopicMetadataAndConfig(Errors.forCode(result.topicConfigErrorCode()).exception());
                        } else if (result.numPartitions() == CreateTopicsResult.UNKNOWN) {
                            topicMetadataAndConfig = new TopicMetadataAndConfig(new UnsupportedVersionException("Topic metadata and configs in CreateTopics response not supported"));
                        } else {
                            List<CreatableTopicConfigs> configs = result.configs();
                            Config topicConfig = new Config(configs.stream().map(this::configEntry).collect(Collectors.toSet()));
                            topicMetadataAndConfig = new TopicMetadataAndConfig(result.topicId(), result.numPartitions(), result.replicationFactor(), topicConfig);
                        }
                        future.complete(topicMetadataAndConfig);
                    }
                }
            }
            // If there are topics to retry, retry them; complete unrealized futures otherwise.
            if (retryTopics.isEmpty()) {
                // The server should send back a response for every topic. But do a sanity check anyway.
                completeUnrealizedFutures(futures.entrySet().stream(), topic -> "The controller response did not contain a result for topic " + topic);
            } else {
                final long now = time.milliseconds();
                final Call call = getCreateTopicsCall(options, futures, retryTopics, retryTopicQuotaExceededExceptions, now, deadline);
                runnable.call(call, now);
            }
        }

        private ConfigEntry configEntry(CreatableTopicConfigs config) {
            return new ConfigEntry(config.name(), config.value(), configSource(DescribeConfigsResponse.ConfigSource.forId(config.configSource())), config.isSensitive(), config.readOnly(), Collections.emptyList(), null, null);
        }

        @Override
        void handleFailure(Throwable throwable) {
            // If there were any topics retries due to a quota exceeded exception, we propagate
            // the initial error back to the caller if the request timed out.
            maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(), throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
            // Fail all the other remaining futures
            completeAllExceptionally(futures.values(), throwable);
        }
    };
}
Also used : CreateTopicsResponse(org.apache.kafka.common.requests.CreateTopicsResponse) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) HashMap(java.util.HashMap) TopicMetadataAndConfig(org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) ThrottlingQuotaExceededException(org.apache.kafka.common.errors.ThrottlingQuotaExceededException) CreatableTopicCollection(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCollection) CreateTopicsRequest(org.apache.kafka.common.requests.CreateTopicsRequest) TopicMetadataAndConfig(org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig) CreateTopicsRequestData(org.apache.kafka.common.message.CreateTopicsRequestData) CreatableTopicResult(org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult) CreatableTopicConfigs(org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicConfigs) LinkedList(java.util.LinkedList) ArrayList(java.util.ArrayList) List(java.util.List) ApiError(org.apache.kafka.common.requests.ApiError) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException)

Aggregations

CreatableTopicResult (org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult)15 CreateTopicsResponseData (org.apache.kafka.common.message.CreateTopicsResponseData)9 Test (org.junit.jupiter.api.Test)8 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)8 CreatableTopic (org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic)7 CreateTopicsRequestData (org.apache.kafka.common.message.CreateTopicsRequestData)6 ArrayList (java.util.ArrayList)5 Uuid (org.apache.kafka.common.Uuid)5 ApiMessageAndVersion (org.apache.kafka.server.common.ApiMessageAndVersion)5 HashMap (java.util.HashMap)4 ApiError (org.apache.kafka.common.requests.ApiError)4 TopicIdPartition (org.apache.kafka.controller.BrokersToIsrs.TopicIdPartition)4 List (java.util.List)3 AlterIsrResponseData (org.apache.kafka.common.message.AlterIsrResponseData)3 TopicRecord (org.apache.kafka.common.metadata.TopicRecord)3 PartitionRegistration (org.apache.kafka.metadata.PartitionRegistration)3 Map (java.util.Map)2 OptionalInt (java.util.OptionalInt)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 InvalidReplicationFactorException (org.apache.kafka.common.errors.InvalidReplicationFactorException)2