Search in sources :

Example 21 with PartitionRegistration

use of org.apache.kafka.metadata.PartitionRegistration in project kafka by apache.

the class ReplicationControlManager method createTopic.

private ApiError createTopic(CreatableTopic topic, List<ApiMessageAndVersion> records, Map<String, CreatableTopicResult> successes) {
    Map<Integer, PartitionRegistration> newParts = new HashMap<>();
    if (!topic.assignments().isEmpty()) {
        if (topic.replicationFactor() != -1) {
            return new ApiError(INVALID_REQUEST, "A manual partition assignment was specified, but replication " + "factor was not set to -1.");
        }
        if (topic.numPartitions() != -1) {
            return new ApiError(INVALID_REQUEST, "A manual partition assignment was specified, but numPartitions " + "was not set to -1.");
        }
        OptionalInt replicationFactor = OptionalInt.empty();
        for (CreatableReplicaAssignment assignment : topic.assignments()) {
            if (newParts.containsKey(assignment.partitionIndex())) {
                return new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT, "Found multiple manual partition assignments for partition " + assignment.partitionIndex());
            }
            validateManualPartitionAssignment(assignment.brokerIds(), replicationFactor);
            replicationFactor = OptionalInt.of(assignment.brokerIds().size());
            List<Integer> isr = assignment.brokerIds().stream().filter(clusterControl::unfenced).collect(Collectors.toList());
            if (isr.isEmpty()) {
                return new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT, "All brokers specified in the manual partition assignment for " + "partition " + assignment.partitionIndex() + " are fenced.");
            }
            newParts.put(assignment.partitionIndex(), new PartitionRegistration(Replicas.toArray(assignment.brokerIds()), Replicas.toArray(isr), Replicas.NONE, Replicas.NONE, isr.get(0), 0, 0));
        }
        ApiError error = maybeCheckCreateTopicPolicy(() -> {
            Map<Integer, List<Integer>> assignments = new HashMap<>();
            newParts.entrySet().forEach(e -> assignments.put(e.getKey(), Replicas.toList(e.getValue().replicas)));
            Map<String, String> configs = new HashMap<>();
            topic.configs().forEach(config -> configs.put(config.name(), config.value()));
            return new CreateTopicPolicy.RequestMetadata(topic.name(), null, null, assignments, configs);
        });
        if (error.isFailure())
            return error;
    } else if (topic.replicationFactor() < -1 || topic.replicationFactor() == 0) {
        return new ApiError(Errors.INVALID_REPLICATION_FACTOR, "Replication factor must be larger than 0, or -1 to use the default value.");
    } else if (topic.numPartitions() < -1 || topic.numPartitions() == 0) {
        return new ApiError(Errors.INVALID_PARTITIONS, "Number of partitions was set to an invalid non-positive value.");
    } else {
        int numPartitions = topic.numPartitions() == -1 ? defaultNumPartitions : topic.numPartitions();
        short replicationFactor = topic.replicationFactor() == -1 ? defaultReplicationFactor : topic.replicationFactor();
        try {
            List<List<Integer>> replicas = clusterControl.placeReplicas(0, numPartitions, replicationFactor);
            for (int partitionId = 0; partitionId < replicas.size(); partitionId++) {
                int[] r = Replicas.toArray(replicas.get(partitionId));
                newParts.put(partitionId, new PartitionRegistration(r, r, Replicas.NONE, Replicas.NONE, r[0], 0, 0));
            }
        } catch (InvalidReplicationFactorException e) {
            return new ApiError(Errors.INVALID_REPLICATION_FACTOR, "Unable to replicate the partition " + replicationFactor + " time(s): " + e.getMessage());
        }
        ApiError error = maybeCheckCreateTopicPolicy(() -> {
            Map<String, String> configs = new HashMap<>();
            topic.configs().forEach(config -> configs.put(config.name(), config.value()));
            return new CreateTopicPolicy.RequestMetadata(topic.name(), numPartitions, replicationFactor, null, configs);
        });
        if (error.isFailure())
            return error;
    }
    Uuid topicId = Uuid.randomUuid();
    successes.put(topic.name(), new CreatableTopicResult().setName(topic.name()).setTopicId(topicId).setErrorCode((short) 0).setErrorMessage(null).setNumPartitions(newParts.size()).setReplicationFactor((short) newParts.get(0).replicas.length));
    records.add(new ApiMessageAndVersion(new TopicRecord().setName(topic.name()).setTopicId(topicId), TOPIC_RECORD.highestSupportedVersion()));
    for (Entry<Integer, PartitionRegistration> partEntry : newParts.entrySet()) {
        int partitionIndex = partEntry.getKey();
        PartitionRegistration info = partEntry.getValue();
        records.add(info.toRecord(topicId, partitionIndex));
    }
    return ApiError.NONE;
}
Also used : PartitionRegistration(org.apache.kafka.metadata.PartitionRegistration) RemoveTopicRecord(org.apache.kafka.common.metadata.RemoveTopicRecord) TopicRecord(org.apache.kafka.common.metadata.TopicRecord) TimelineHashMap(org.apache.kafka.timeline.TimelineHashMap) HashMap(java.util.HashMap) OptionalInt(java.util.OptionalInt) TimelineInteger(org.apache.kafka.timeline.TimelineInteger) Uuid(org.apache.kafka.common.Uuid) CreatableReplicaAssignment(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignment) InvalidReplicationFactorException(org.apache.kafka.common.errors.InvalidReplicationFactorException) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) CreatableTopicResult(org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult) ArrayList(java.util.ArrayList) List(java.util.List) ApiError(org.apache.kafka.common.requests.ApiError)

Example 22 with PartitionRegistration

use of org.apache.kafka.metadata.PartitionRegistration in project kafka by apache.

the class ReplicationControlManager method createPartitions.

void createPartitions(CreatePartitionsTopic topic, List<ApiMessageAndVersion> records) {
    Uuid topicId = topicsByName.get(topic.name());
    if (topicId == null) {
        throw new UnknownTopicOrPartitionException();
    }
    TopicControlInfo topicInfo = topics.get(topicId);
    if (topicInfo == null) {
        throw new UnknownTopicOrPartitionException();
    }
    if (topic.count() == topicInfo.parts.size()) {
        throw new InvalidPartitionsException("Topic already has " + topicInfo.parts.size() + " partition(s).");
    } else if (topic.count() < topicInfo.parts.size()) {
        throw new InvalidPartitionsException("The topic " + topic.name() + " currently " + "has " + topicInfo.parts.size() + " partition(s); " + topic.count() + " would not be an increase.");
    }
    int additional = topic.count() - topicInfo.parts.size();
    if (topic.assignments() != null) {
        if (topic.assignments().size() != additional) {
            throw new InvalidReplicaAssignmentException("Attempted to add " + additional + " additional partition(s), but only " + topic.assignments().size() + " assignment(s) were specified.");
        }
    }
    Iterator<PartitionRegistration> iterator = topicInfo.parts.values().iterator();
    if (!iterator.hasNext()) {
        throw new UnknownServerException("Invalid state: topic " + topic.name() + " appears to have no partitions.");
    }
    PartitionRegistration partitionInfo = iterator.next();
    if (partitionInfo.replicas.length > Short.MAX_VALUE) {
        throw new UnknownServerException("Invalid replication factor " + partitionInfo.replicas.length + ": expected a number equal to less than " + Short.MAX_VALUE);
    }
    short replicationFactor = (short) partitionInfo.replicas.length;
    int startPartitionId = topicInfo.parts.size();
    List<List<Integer>> placements;
    List<List<Integer>> isrs;
    if (topic.assignments() != null) {
        placements = new ArrayList<>();
        isrs = new ArrayList<>();
        for (int i = 0; i < topic.assignments().size(); i++) {
            CreatePartitionsAssignment assignment = topic.assignments().get(i);
            validateManualPartitionAssignment(assignment.brokerIds(), OptionalInt.of(replicationFactor));
            placements.add(assignment.brokerIds());
            List<Integer> isr = assignment.brokerIds().stream().filter(clusterControl::unfenced).collect(Collectors.toList());
            if (isr.isEmpty()) {
                throw new InvalidReplicaAssignmentException("All brokers specified in the manual partition assignment for " + "partition " + (startPartitionId + i) + " are fenced.");
            }
            isrs.add(isr);
        }
    } else {
        placements = clusterControl.placeReplicas(startPartitionId, additional, replicationFactor);
        isrs = placements;
    }
    int partitionId = startPartitionId;
    for (int i = 0; i < placements.size(); i++) {
        List<Integer> placement = placements.get(i);
        List<Integer> isr = isrs.get(i);
        records.add(new ApiMessageAndVersion(new PartitionRecord().setPartitionId(partitionId).setTopicId(topicId).setReplicas(placement).setIsr(isr).setRemovingReplicas(Collections.emptyList()).setAddingReplicas(Collections.emptyList()).setLeader(isr.get(0)).setLeaderEpoch(0).setPartitionEpoch(0), PARTITION_RECORD.highestSupportedVersion()));
        partitionId++;
    }
}
Also used : PartitionRegistration(org.apache.kafka.metadata.PartitionRegistration) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) PartitionRecord(org.apache.kafka.common.metadata.PartitionRecord) InvalidPartitionsException(org.apache.kafka.common.errors.InvalidPartitionsException) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) TimelineInteger(org.apache.kafka.timeline.TimelineInteger) Uuid(org.apache.kafka.common.Uuid) CreatePartitionsAssignment(org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsAssignment) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) ArrayList(java.util.ArrayList) List(java.util.List) InvalidReplicaAssignmentException(org.apache.kafka.common.errors.InvalidReplicaAssignmentException)

Example 23 with PartitionRegistration

use of org.apache.kafka.metadata.PartitionRegistration in project kafka by apache.

the class ReplicationControlManager method replay.

public void replay(RemoveTopicRecord record) {
    // Remove this topic from the topics map and the topicsByName map.
    TopicControlInfo topic = topics.remove(record.topicId());
    if (topic == null) {
        throw new UnknownTopicIdException("Can't find topic with ID " + record.topicId() + " to remove.");
    }
    topicsByName.remove(topic.name);
    reassigningTopics.remove(record.topicId());
    // Delete the configurations associated with this topic.
    configurationControl.deleteTopicConfigs(topic.name);
    // Remove the entries for this topic in brokersToIsrs.
    for (PartitionRegistration partition : topic.parts.values()) {
        for (int i = 0; i < partition.isr.length; i++) {
            brokersToIsrs.removeTopicEntryForBroker(topic.id, partition.isr[i]);
        }
        if (partition.leader != partition.preferredReplica()) {
            preferredReplicaImbalanceCount.decrement();
        }
        globalPartitionCount.decrement();
    }
    brokersToIsrs.removeTopicEntryForBroker(topic.id, NO_LEADER);
    controllerMetrics.setGlobalTopicsCount(topics.size());
    controllerMetrics.setGlobalPartitionCount(globalPartitionCount.get());
    controllerMetrics.setOfflinePartitionCount(brokersToIsrs.offlinePartitionCount());
    controllerMetrics.setPreferredReplicaImbalanceCount(preferredReplicaImbalanceCount.get());
    log.info("Removed topic {} with ID {}.", topic.name, record.topicId());
}
Also used : PartitionRegistration(org.apache.kafka.metadata.PartitionRegistration) UnknownTopicIdException(org.apache.kafka.common.errors.UnknownTopicIdException)

Example 24 with PartitionRegistration

use of org.apache.kafka.metadata.PartitionRegistration in project kafka by apache.

the class ReplicationControlManager method generateLeaderAndIsrUpdates.

/**
 * Iterate over a sequence of partitions and generate ISR changes and/or leader
 * changes if necessary.
 *
 * @param context           A human-readable context string used in log4j logging.
 * @param brokerToRemove    NO_LEADER if no broker is being removed; the ID of the
 *                          broker to remove from the ISR and leadership, otherwise.
 * @param brokerToAdd       NO_LEADER if no broker is being added; the ID of the
 *                          broker which is now eligible to be a leader, otherwise.
 * @param records           A list of records which we will append to.
 * @param iterator          The iterator containing the partitions to examine.
 */
void generateLeaderAndIsrUpdates(String context, int brokerToRemove, int brokerToAdd, List<ApiMessageAndVersion> records, Iterator<TopicIdPartition> iterator) {
    int oldSize = records.size();
    // If the caller passed a valid broker ID for brokerToAdd, rather than passing
    // NO_LEADER, that node will be considered an acceptable leader even if it is
    // currently fenced. This is useful when handling unfencing. The reason is that
    // while we're generating the records to handle unfencing, the ClusterControlManager
    // still shows the node as fenced.
    // 
    // Similarly, if the caller passed a valid broker ID for brokerToRemove, rather
    // than passing NO_LEADER, that node will never be considered an acceptable leader.
    // This is useful when handling a newly fenced node. We also exclude brokerToRemove
    // from the target ISR, but we need to exclude it here too, to handle the case
    // where there is an unclean leader election which chooses a leader from outside
    // the ISR.
    Function<Integer, Boolean> isAcceptableLeader = r -> (r != brokerToRemove) && (r == brokerToAdd || clusterControl.unfenced(r));
    while (iterator.hasNext()) {
        TopicIdPartition topicIdPart = iterator.next();
        TopicControlInfo topic = topics.get(topicIdPart.topicId());
        if (topic == null) {
            throw new RuntimeException("Topic ID " + topicIdPart.topicId() + " existed in isrMembers, but not in the topics map.");
        }
        PartitionRegistration partition = topic.parts.get(topicIdPart.partitionId());
        if (partition == null) {
            throw new RuntimeException("Partition " + topicIdPart + " existed in isrMembers, but not in the partitions map.");
        }
        PartitionChangeBuilder builder = new PartitionChangeBuilder(partition, topicIdPart.topicId(), topicIdPart.partitionId(), isAcceptableLeader, () -> configurationControl.uncleanLeaderElectionEnabledForTopic(topic.name));
        // Note: if brokerToRemove was passed as NO_LEADER, this is a no-op (the new
        // target ISR will be the same as the old one).
        builder.setTargetIsr(Replicas.toList(Replicas.copyWithout(partition.isr, brokerToRemove)));
        builder.build().ifPresent(records::add);
    }
    if (records.size() != oldSize) {
        if (log.isDebugEnabled()) {
            StringBuilder bld = new StringBuilder();
            String prefix = "";
            for (ListIterator<ApiMessageAndVersion> iter = records.listIterator(oldSize); iter.hasNext(); ) {
                ApiMessageAndVersion apiMessageAndVersion = iter.next();
                PartitionChangeRecord record = (PartitionChangeRecord) apiMessageAndVersion.message();
                bld.append(prefix).append(topics.get(record.topicId()).name).append("-").append(record.partitionId());
                prefix = ", ";
            }
            log.debug("{}: changing partition(s): {}", context, bld.toString());
        } else if (log.isInfoEnabled()) {
            log.info("{}: changing {} partition(s)", context, records.size() - oldSize);
        }
    }
}
Also used : ListPartitionReassignmentsTopics(org.apache.kafka.common.message.ListPartitionReassignmentsRequestData.ListPartitionReassignmentsTopics) OpType(org.apache.kafka.clients.admin.AlterConfigOp.OpType) InvalidReplicationFactorException(org.apache.kafka.common.errors.InvalidReplicationFactorException) ElectLeadersRequestData(org.apache.kafka.common.message.ElectLeadersRequestData) CreatePartitionsTopic(org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic) PartitionResult(org.apache.kafka.common.message.ElectLeadersResponseData.PartitionResult) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) TimelineInteger(org.apache.kafka.timeline.TimelineInteger) SET(org.apache.kafka.clients.admin.AlterConfigOp.OpType.SET) AlterIsrResponseData(org.apache.kafka.common.message.AlterIsrResponseData) NoReassignmentInProgressException(org.apache.kafka.common.errors.NoReassignmentInProgressException) INVALID_REQUEST(org.apache.kafka.common.protocol.Errors.INVALID_REQUEST) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) UNREGISTER_BROKER_RECORD(org.apache.kafka.common.metadata.MetadataRecordType.UNREGISTER_BROKER_RECORD) SnapshotRegistry(org.apache.kafka.timeline.SnapshotRegistry) NO_LEADER(org.apache.kafka.metadata.LeaderConstants.NO_LEADER) InvalidPartitionsException(org.apache.kafka.common.errors.InvalidPartitionsException) UNKNOWN_TOPIC_ID(org.apache.kafka.common.protocol.Errors.UNKNOWN_TOPIC_ID) PartitionChangeRecord(org.apache.kafka.common.metadata.PartitionChangeRecord) Errors(org.apache.kafka.common.protocol.Errors) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) CreatableTopicResult(org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult) RemoveTopicRecord(org.apache.kafka.common.metadata.RemoveTopicRecord) TimelineHashMap(org.apache.kafka.timeline.TimelineHashMap) NO_REASSIGNMENT_IN_PROGRESS(org.apache.kafka.common.protocol.Errors.NO_REASSIGNMENT_IN_PROGRESS) BrokerRegistration(org.apache.kafka.metadata.BrokerRegistration) Supplier(java.util.function.Supplier) TOPIC(org.apache.kafka.common.config.ConfigResource.Type.TOPIC) ArrayList(java.util.ArrayList) FENCED_LEADER_EPOCH(org.apache.kafka.common.protocol.Errors.FENCED_LEADER_EPOCH) UnfenceBrokerRecord(org.apache.kafka.common.metadata.UnfenceBrokerRecord) ElectionType(org.apache.kafka.common.ElectionType) BrokerHeartbeatReply(org.apache.kafka.metadata.BrokerHeartbeatReply) AlterPartitionReassignmentsRequestData(org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData) UnknownTopicIdException(org.apache.kafka.common.errors.UnknownTopicIdException) Topic(org.apache.kafka.common.internals.Topic) CreatableReplicaAssignment(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignment) FENCE_BROKER_RECORD(org.apache.kafka.common.metadata.MetadataRecordType.FENCE_BROKER_RECORD) ElectLeadersResponseData(org.apache.kafka.common.message.ElectLeadersResponseData) ReassignablePartition(org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignablePartition) BrokerHeartbeatRequestData(org.apache.kafka.common.message.BrokerHeartbeatRequestData) CreatableTopicCollection(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCollection) CreateTopicsRequestData(org.apache.kafka.common.message.CreateTopicsRequestData) ReassignableTopic(org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignableTopic) PartitionRecord(org.apache.kafka.common.metadata.PartitionRecord) CreatePartitionsAssignment(org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsAssignment) UNFENCE_BROKER_RECORD(org.apache.kafka.common.metadata.MetadataRecordType.UNFENCE_BROKER_RECORD) PARTITION_RECORD(org.apache.kafka.common.metadata.MetadataRecordType.PARTITION_RECORD) BrokerIdNotRegisteredException(org.apache.kafka.common.errors.BrokerIdNotRegisteredException) ListIterator(java.util.ListIterator) CreatePartitionsTopicResult(org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartitionsTopicResult) ReassignableTopicResponse(org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignableTopicResponse) ReplicaElectionResult(org.apache.kafka.common.message.ElectLeadersResponseData.ReplicaElectionResult) TOPIC_RECORD(org.apache.kafka.common.metadata.MetadataRecordType.TOPIC_RECORD) FenceBrokerRecord(org.apache.kafka.common.metadata.FenceBrokerRecord) TopicRecord(org.apache.kafka.common.metadata.TopicRecord) Collection(java.util.Collection) UnregisterBrokerRecord(org.apache.kafka.common.metadata.UnregisterBrokerRecord) TopicPartitions(org.apache.kafka.common.message.ElectLeadersRequestData.TopicPartitions) PartitionRegistration(org.apache.kafka.metadata.PartitionRegistration) Collectors(java.util.stream.Collectors) Replicas(org.apache.kafka.metadata.Replicas) INVALID_UPDATE_VERSION(org.apache.kafka.common.protocol.Errors.INVALID_UPDATE_VERSION) TopicIdPartition(org.apache.kafka.controller.BrokersToIsrs.TopicIdPartition) List(java.util.List) NO_LEADER_CHANGE(org.apache.kafka.metadata.LeaderConstants.NO_LEADER_CHANGE) Entry(java.util.Map.Entry) Optional(java.util.Optional) CreateTopicsResponseData(org.apache.kafka.common.message.CreateTopicsResponseData) Uuid(org.apache.kafka.common.Uuid) ReassignablePartitionResponse(org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignablePartitionResponse) ListPartitionReassignmentsResponseData(org.apache.kafka.common.message.ListPartitionReassignmentsResponseData) HashMap(java.util.HashMap) REMOVE_TOPIC_RECORD(org.apache.kafka.common.metadata.MetadataRecordType.REMOVE_TOPIC_RECORD) SimpleImmutableEntry(java.util.AbstractMap.SimpleImmutableEntry) CreatableTopic(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic) Function(java.util.function.Function) OptionalInt(java.util.OptionalInt) ApiError(org.apache.kafka.common.requests.ApiError) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) ConfigResource(org.apache.kafka.common.config.ConfigResource) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) PolicyViolationException(org.apache.kafka.common.errors.PolicyViolationException) OngoingPartitionReassignment(org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingPartitionReassignment) OngoingTopicReassignment(org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingTopicReassignment) NoSuchElementException(java.util.NoSuchElementException) UNKNOWN_TOPIC_OR_PARTITION(org.apache.kafka.common.protocol.Errors.UNKNOWN_TOPIC_OR_PARTITION) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) InvalidReplicaAssignmentException(org.apache.kafka.common.errors.InvalidReplicaAssignmentException) NO_OP_EXISTENCE_CHECKER(org.apache.kafka.controller.ConfigurationControlManager.NO_OP_EXISTENCE_CHECKER) AlterPartitionReassignmentsResponseData(org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData) CreateTopicPolicy(org.apache.kafka.server.policy.CreateTopicPolicy) AlterIsrRequestData(org.apache.kafka.common.message.AlterIsrRequestData) InvalidRequestException(org.apache.kafka.common.errors.InvalidRequestException) Collections(java.util.Collections) ApiException(org.apache.kafka.common.errors.ApiException) PartitionRegistration(org.apache.kafka.metadata.PartitionRegistration) PartitionChangeRecord(org.apache.kafka.common.metadata.PartitionChangeRecord) TopicIdPartition(org.apache.kafka.controller.BrokersToIsrs.TopicIdPartition) TimelineInteger(org.apache.kafka.timeline.TimelineInteger) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion)

Example 25 with PartitionRegistration

use of org.apache.kafka.metadata.PartitionRegistration in project kafka by apache.

the class TopicDelta method localChanges.

/**
 * Find the partitions that have change based on the replica given.
 *
 * The changes identified are:
 *   1. partitions for which the broker is not a replica anymore
 *   2. partitions for which the broker is now the leader
 *   3. partitions for which the broker is now a follower
 *
 * @param brokerId the broker id
 * @return the list of partitions which the broker should remove, become leader or become follower.
 */
public LocalReplicaChanges localChanges(int brokerId) {
    Set<TopicPartition> deletes = new HashSet<>();
    Map<TopicPartition, LocalReplicaChanges.PartitionInfo> leaders = new HashMap<>();
    Map<TopicPartition, LocalReplicaChanges.PartitionInfo> followers = new HashMap<>();
    for (Entry<Integer, PartitionRegistration> entry : partitionChanges.entrySet()) {
        if (!Replicas.contains(entry.getValue().replicas, brokerId)) {
            PartitionRegistration prevPartition = image.partitions().get(entry.getKey());
            if (prevPartition != null && Replicas.contains(prevPartition.replicas, brokerId)) {
                deletes.add(new TopicPartition(name(), entry.getKey()));
            }
        } else if (entry.getValue().leader == brokerId) {
            PartitionRegistration prevPartition = image.partitions().get(entry.getKey());
            if (prevPartition == null || prevPartition.partitionEpoch != entry.getValue().partitionEpoch) {
                leaders.put(new TopicPartition(name(), entry.getKey()), new LocalReplicaChanges.PartitionInfo(id(), entry.getValue()));
            }
        } else if (entry.getValue().leader != brokerId && Replicas.contains(entry.getValue().replicas, brokerId)) {
            PartitionRegistration prevPartition = image.partitions().get(entry.getKey());
            if (prevPartition == null || prevPartition.partitionEpoch != entry.getValue().partitionEpoch) {
                followers.put(new TopicPartition(name(), entry.getKey()), new LocalReplicaChanges.PartitionInfo(id(), entry.getValue()));
            }
        }
    }
    return new LocalReplicaChanges(deletes, leaders, followers);
}
Also used : PartitionRegistration(org.apache.kafka.metadata.PartitionRegistration) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) HashSet(java.util.HashSet)

Aggregations

PartitionRegistration (org.apache.kafka.metadata.PartitionRegistration)26 Uuid (org.apache.kafka.common.Uuid)11 ApiMessageAndVersion (org.apache.kafka.server.common.ApiMessageAndVersion)10 Test (org.junit.jupiter.api.Test)10 ArrayList (java.util.ArrayList)6 HashMap (java.util.HashMap)6 TopicRecord (org.apache.kafka.common.metadata.TopicRecord)5 List (java.util.List)4 UnknownTopicOrPartitionException (org.apache.kafka.common.errors.UnknownTopicOrPartitionException)4 AlterIsrRequestData (org.apache.kafka.common.message.AlterIsrRequestData)4 AlterIsrResponseData (org.apache.kafka.common.message.AlterIsrResponseData)3 AlterPartitionReassignmentsRequestData (org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData)3 ReassignableTopic (org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignableTopic)3 AlterPartitionReassignmentsResponseData (org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData)3 CreatableTopicResult (org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult)3 PartitionRecord (org.apache.kafka.common.metadata.PartitionRecord)3 ApiError (org.apache.kafka.common.requests.ApiError)3 TimelineInteger (org.apache.kafka.timeline.TimelineInteger)3 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)3 OptionalInt (java.util.OptionalInt)2