use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class ReplicationControlManager method createPartitions.
void createPartitions(CreatePartitionsTopic topic, List<ApiMessageAndVersion> records) {
Uuid topicId = topicsByName.get(topic.name());
if (topicId == null) {
throw new UnknownTopicOrPartitionException();
}
TopicControlInfo topicInfo = topics.get(topicId);
if (topicInfo == null) {
throw new UnknownTopicOrPartitionException();
}
if (topic.count() == topicInfo.parts.size()) {
throw new InvalidPartitionsException("Topic already has " + topicInfo.parts.size() + " partition(s).");
} else if (topic.count() < topicInfo.parts.size()) {
throw new InvalidPartitionsException("The topic " + topic.name() + " currently " + "has " + topicInfo.parts.size() + " partition(s); " + topic.count() + " would not be an increase.");
}
int additional = topic.count() - topicInfo.parts.size();
if (topic.assignments() != null) {
if (topic.assignments().size() != additional) {
throw new InvalidReplicaAssignmentException("Attempted to add " + additional + " additional partition(s), but only " + topic.assignments().size() + " assignment(s) were specified.");
}
}
Iterator<PartitionRegistration> iterator = topicInfo.parts.values().iterator();
if (!iterator.hasNext()) {
throw new UnknownServerException("Invalid state: topic " + topic.name() + " appears to have no partitions.");
}
PartitionRegistration partitionInfo = iterator.next();
if (partitionInfo.replicas.length > Short.MAX_VALUE) {
throw new UnknownServerException("Invalid replication factor " + partitionInfo.replicas.length + ": expected a number equal to less than " + Short.MAX_VALUE);
}
short replicationFactor = (short) partitionInfo.replicas.length;
int startPartitionId = topicInfo.parts.size();
List<List<Integer>> placements;
List<List<Integer>> isrs;
if (topic.assignments() != null) {
placements = new ArrayList<>();
isrs = new ArrayList<>();
for (int i = 0; i < topic.assignments().size(); i++) {
CreatePartitionsAssignment assignment = topic.assignments().get(i);
validateManualPartitionAssignment(assignment.brokerIds(), OptionalInt.of(replicationFactor));
placements.add(assignment.brokerIds());
List<Integer> isr = assignment.brokerIds().stream().filter(clusterControl::unfenced).collect(Collectors.toList());
if (isr.isEmpty()) {
throw new InvalidReplicaAssignmentException("All brokers specified in the manual partition assignment for " + "partition " + (startPartitionId + i) + " are fenced.");
}
isrs.add(isr);
}
} else {
placements = clusterControl.placeReplicas(startPartitionId, additional, replicationFactor);
isrs = placements;
}
int partitionId = startPartitionId;
for (int i = 0; i < placements.size(); i++) {
List<Integer> placement = placements.get(i);
List<Integer> isr = isrs.get(i);
records.add(new ApiMessageAndVersion(new PartitionRecord().setPartitionId(partitionId).setTopicId(topicId).setReplicas(placement).setIsr(isr).setRemovingReplicas(Collections.emptyList()).setAddingReplicas(Collections.emptyList()).setLeader(isr.get(0)).setLeaderEpoch(0).setPartitionEpoch(0), PARTITION_RECORD.highestSupportedVersion()));
partitionId++;
}
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class ReplicationControlManager method createTopics.
ControllerResult<CreateTopicsResponseData> createTopics(CreateTopicsRequestData request) {
Map<String, ApiError> topicErrors = new HashMap<>();
List<ApiMessageAndVersion> records = new ArrayList<>();
// Check the topic names.
validateNewTopicNames(topicErrors, request.topics());
// Identify topics that already exist and mark them with the appropriate error
request.topics().stream().filter(creatableTopic -> topicsByName.containsKey(creatableTopic.name())).forEach(t -> topicErrors.put(t.name(), new ApiError(Errors.TOPIC_ALREADY_EXISTS, "Topic '" + t.name() + "' already exists.")));
// Verify that the configurations for the new topics are OK, and figure out what
// ConfigRecords should be created.
Map<ConfigResource, Map<String, Entry<OpType, String>>> configChanges = computeConfigChanges(topicErrors, request.topics());
ControllerResult<Map<ConfigResource, ApiError>> configResult = configurationControl.incrementalAlterConfigs(configChanges, NO_OP_EXISTENCE_CHECKER);
for (Entry<ConfigResource, ApiError> entry : configResult.response().entrySet()) {
if (entry.getValue().isFailure()) {
topicErrors.put(entry.getKey().name(), entry.getValue());
}
}
records.addAll(configResult.records());
// Try to create whatever topics are needed.
Map<String, CreatableTopicResult> successes = new HashMap<>();
for (CreatableTopic topic : request.topics()) {
if (topicErrors.containsKey(topic.name()))
continue;
ApiError error;
try {
error = createTopic(topic, records, successes);
} catch (ApiException e) {
error = ApiError.fromThrowable(e);
}
if (error.isFailure()) {
topicErrors.put(topic.name(), error);
}
}
// Create responses for all topics.
CreateTopicsResponseData data = new CreateTopicsResponseData();
StringBuilder resultsBuilder = new StringBuilder();
String resultsPrefix = "";
for (CreatableTopic topic : request.topics()) {
ApiError error = topicErrors.get(topic.name());
if (error != null) {
data.topics().add(new CreatableTopicResult().setName(topic.name()).setErrorCode(error.error().code()).setErrorMessage(error.message()));
resultsBuilder.append(resultsPrefix).append(topic).append(": ").append(error.error()).append(" (").append(error.message()).append(")");
resultsPrefix = ", ";
continue;
}
CreatableTopicResult result = successes.get(topic.name());
data.topics().add(result);
resultsBuilder.append(resultsPrefix).append(topic).append(": ").append("SUCCESS");
resultsPrefix = ", ";
}
if (request.validateOnly()) {
log.info("Validate-only CreateTopics result(s): {}", resultsBuilder.toString());
return ControllerResult.atomicOf(Collections.emptyList(), data);
} else {
log.info("CreateTopics result(s): {}", resultsBuilder.toString());
return ControllerResult.atomicOf(records, data);
}
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class ReplicationControlManager method generateLeaderAndIsrUpdates.
/**
* Iterate over a sequence of partitions and generate ISR changes and/or leader
* changes if necessary.
*
* @param context A human-readable context string used in log4j logging.
* @param brokerToRemove NO_LEADER if no broker is being removed; the ID of the
* broker to remove from the ISR and leadership, otherwise.
* @param brokerToAdd NO_LEADER if no broker is being added; the ID of the
* broker which is now eligible to be a leader, otherwise.
* @param records A list of records which we will append to.
* @param iterator The iterator containing the partitions to examine.
*/
void generateLeaderAndIsrUpdates(String context, int brokerToRemove, int brokerToAdd, List<ApiMessageAndVersion> records, Iterator<TopicIdPartition> iterator) {
int oldSize = records.size();
// If the caller passed a valid broker ID for brokerToAdd, rather than passing
// NO_LEADER, that node will be considered an acceptable leader even if it is
// currently fenced. This is useful when handling unfencing. The reason is that
// while we're generating the records to handle unfencing, the ClusterControlManager
// still shows the node as fenced.
//
// Similarly, if the caller passed a valid broker ID for brokerToRemove, rather
// than passing NO_LEADER, that node will never be considered an acceptable leader.
// This is useful when handling a newly fenced node. We also exclude brokerToRemove
// from the target ISR, but we need to exclude it here too, to handle the case
// where there is an unclean leader election which chooses a leader from outside
// the ISR.
Function<Integer, Boolean> isAcceptableLeader = r -> (r != brokerToRemove) && (r == brokerToAdd || clusterControl.unfenced(r));
while (iterator.hasNext()) {
TopicIdPartition topicIdPart = iterator.next();
TopicControlInfo topic = topics.get(topicIdPart.topicId());
if (topic == null) {
throw new RuntimeException("Topic ID " + topicIdPart.topicId() + " existed in isrMembers, but not in the topics map.");
}
PartitionRegistration partition = topic.parts.get(topicIdPart.partitionId());
if (partition == null) {
throw new RuntimeException("Partition " + topicIdPart + " existed in isrMembers, but not in the partitions map.");
}
PartitionChangeBuilder builder = new PartitionChangeBuilder(partition, topicIdPart.topicId(), topicIdPart.partitionId(), isAcceptableLeader, () -> configurationControl.uncleanLeaderElectionEnabledForTopic(topic.name));
// Note: if brokerToRemove was passed as NO_LEADER, this is a no-op (the new
// target ISR will be the same as the old one).
builder.setTargetIsr(Replicas.toList(Replicas.copyWithout(partition.isr, brokerToRemove)));
builder.build().ifPresent(records::add);
}
if (records.size() != oldSize) {
if (log.isDebugEnabled()) {
StringBuilder bld = new StringBuilder();
String prefix = "";
for (ListIterator<ApiMessageAndVersion> iter = records.listIterator(oldSize); iter.hasNext(); ) {
ApiMessageAndVersion apiMessageAndVersion = iter.next();
PartitionChangeRecord record = (PartitionChangeRecord) apiMessageAndVersion.message();
bld.append(prefix).append(topics.get(record.topicId()).name).append("-").append(record.partitionId());
prefix = ", ";
}
log.debug("{}: changing partition(s): {}", context, bld.toString());
} else if (log.isInfoEnabled()) {
log.info("{}: changing {} partition(s)", context, records.size() - oldSize);
}
}
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class ReplicationControlManager method electLeaders.
ControllerResult<ElectLeadersResponseData> electLeaders(ElectLeadersRequestData request) {
ElectionType electionType = electionType(request.electionType());
List<ApiMessageAndVersion> records = new ArrayList<>();
ElectLeadersResponseData response = new ElectLeadersResponseData();
if (request.topicPartitions() == null) {
// compatibility with the old controller.
for (Entry<String, Uuid> topicEntry : topicsByName.entrySet()) {
String topicName = topicEntry.getKey();
ReplicaElectionResult topicResults = new ReplicaElectionResult().setTopic(topicName);
response.replicaElectionResults().add(topicResults);
TopicControlInfo topic = topics.get(topicEntry.getValue());
if (topic != null) {
for (int partitionId : topic.parts.keySet()) {
ApiError error = electLeader(topicName, partitionId, electionType, records);
// partitions which already have the desired leader.
if (error.error() != Errors.ELECTION_NOT_NEEDED) {
topicResults.partitionResult().add(new PartitionResult().setPartitionId(partitionId).setErrorCode(error.error().code()).setErrorMessage(error.message()));
}
}
}
}
} else {
for (TopicPartitions topic : request.topicPartitions()) {
ReplicaElectionResult topicResults = new ReplicaElectionResult().setTopic(topic.topic());
response.replicaElectionResults().add(topicResults);
for (int partitionId : topic.partitions()) {
ApiError error = electLeader(topic.topic(), partitionId, electionType, records);
topicResults.partitionResult().add(new PartitionResult().setPartitionId(partitionId).setErrorCode(error.error().code()).setErrorMessage(error.message()));
}
}
}
return ControllerResult.of(records, response);
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class AclControlManager method createAcls.
ControllerResult<List<AclCreateResult>> createAcls(List<AclBinding> acls) {
List<AclCreateResult> results = new ArrayList<>(acls.size());
List<ApiMessageAndVersion> records = new ArrayList<>(acls.size());
for (AclBinding acl : acls) {
try {
validateNewAcl(acl);
} catch (Throwable t) {
ApiException e = (t instanceof ApiException) ? (ApiException) t : new UnknownServerException("Unknown error while trying to create ACL", t);
results.add(new AclCreateResult(e));
continue;
}
StandardAcl standardAcl = StandardAcl.fromAclBinding(acl);
if (existingAcls.add(standardAcl)) {
StandardAclWithId standardAclWithId = new StandardAclWithId(newAclId(), standardAcl);
idToAcl.put(standardAclWithId.id(), standardAcl);
records.add(new ApiMessageAndVersion(standardAclWithId.toRecord(), (short) 0));
}
results.add(AclCreateResult.SUCCESS);
}
return new ControllerResult<>(records, results, true);
}
Aggregations