use of com.linkedin.kafka.cruisecontrol.executor.ExecutionProposal in project cruise-control by linkedin.
the class AnalyzerUtils method getDiff.
/**
* Get the diff represented by the set of balancing proposals to move from initial to final distribution.
*
* @param initialReplicaDistribution Initial distribution of replicas over the cluster.
* @param initialLeaderDistribution Initial distribution of the leaders.
* @param optimizedClusterModel The optimized cluster model.
* @return The diff represented by the set of balancing proposals to move from initial to final distribution.
*/
public static Set<ExecutionProposal> getDiff(Map<TopicPartition, List<Integer>> initialReplicaDistribution, Map<TopicPartition, Integer> initialLeaderDistribution, ClusterModel optimizedClusterModel) {
Map<TopicPartition, List<Integer>> finalDistribution = optimizedClusterModel.getReplicaDistribution();
// Sanity check to make sure that given distributions contain the same replicas.
if (!initialReplicaDistribution.keySet().equals(finalDistribution.keySet())) {
throw new IllegalArgumentException("Attempt to diff distributions with different partitions.");
}
for (Map.Entry<TopicPartition, List<Integer>> entry : initialReplicaDistribution.entrySet()) {
TopicPartition tp = entry.getKey();
List<Integer> initialReplicas = entry.getValue();
if (finalDistribution.get(tp).size() != initialReplicas.size()) {
throw new IllegalArgumentException("Attempt to diff distributions with modified replication factor.");
}
}
// Generate a set of execution proposals to represent the diff between initial and final distribution.
Set<ExecutionProposal> diff = new HashSet<>();
for (Map.Entry<TopicPartition, List<Integer>> entry : initialReplicaDistribution.entrySet()) {
TopicPartition tp = entry.getKey();
List<Integer> initialReplicas = entry.getValue();
List<Integer> finalReplicas = finalDistribution.get(tp);
int finalLeaderId = optimizedClusterModel.partition(tp).leader().broker().id();
// The partition has no change.
if (finalReplicas.equals(initialReplicas) && finalLeaderId == initialLeaderDistribution.get(tp)) {
continue;
}
// We need to adjust the final broker list order to ensure the final leader is the first replica.
if (finalLeaderId != finalReplicas.get(0)) {
int leaderPos = finalReplicas.indexOf(finalLeaderId);
finalReplicas.set(leaderPos, finalReplicas.get(0));
finalReplicas.set(0, finalLeaderId);
}
Double partitionSize = optimizedClusterModel.partition(tp).leader().load().expectedUtilizationFor(Resource.DISK);
diff.add(new ExecutionProposal(tp, partitionSize.intValue(), initialLeaderDistribution.get(tp), initialReplicas, finalReplicas));
}
return diff;
}
use of com.linkedin.kafka.cruisecontrol.executor.ExecutionProposal in project cruise-control by linkedin.
the class KafkaCruiseControlServletUtils method getProposalSummary.
static String getProposalSummary(GoalOptimizer.OptimizerResult result) {
int numReplicaMovements = 0;
int numLeaderMovements = 0;
long dataToMove = 0;
for (ExecutionProposal p : result.goalProposals()) {
if (!p.replicasToAdd().isEmpty() || !p.replicasToRemove().isEmpty()) {
numReplicaMovements++;
dataToMove += p.dataToMoveInMB();
} else {
numLeaderMovements++;
}
}
return String.format("%n%nThe optimization proposal has %d replica(%d MB) movements and %d leadership movements " + "based on the cluster model with %d recent snapshot windows and %.3f%% of the partitions " + "covered.", numReplicaMovements, dataToMove, numLeaderMovements, result.clusterModelStats().numSnapshotWindows(), result.clusterModelStats().monitoredPartitionsPercentage() * 100);
}
use of com.linkedin.kafka.cruisecontrol.executor.ExecutionProposal in project cruise-control by linkedin.
the class ExcludedTopicsTest method test.
@Test
public void test() throws Exception {
if (_exceptionClass == null) {
Map<TopicPartition, List<Integer>> initReplicaDistribution = _clusterModel.getReplicaDistribution();
Map<TopicPartition, Integer> initLeaderDistribution = _clusterModel.getLeaderDistribution();
if (_expectedToOptimize) {
assertTrue("Excluded Topics Test failed to optimize " + _goal.name() + " with excluded topics.", _goal.optimize(_clusterModel, Collections.emptySet(), _excludedTopics));
} else {
assertFalse("Excluded Topics Test optimized " + _goal.name() + " with excluded topics " + _excludedTopics, _goal.optimize(_clusterModel, Collections.emptySet(), _excludedTopics));
}
// Generated proposals cannot have the excluded topic.
if (!_excludedTopics.isEmpty()) {
Set<ExecutionProposal> goalProposals = AnalyzerUtils.getDiff(initReplicaDistribution, initLeaderDistribution, _clusterModel);
for (ExecutionProposal proposal : goalProposals) {
if (_excludedTopics.contains(proposal.topic())) {
for (int brokerId : proposal.replicasToRemove()) {
if (_clusterModel.broker(brokerId).isAlive()) {
fail(String.format("Proposal %s contains excluded topic %s, but the broker %d is still alive.", proposal, proposal.topic(), brokerId));
}
}
}
}
}
} else {
expected.expect(_exceptionClass);
assertTrue("Excluded Topics Test failed to optimize with excluded topics.", _goal.optimize(_clusterModel, Collections.emptySet(), _excludedTopics));
}
}
use of com.linkedin.kafka.cruisecontrol.executor.ExecutionProposal in project cruise-control by linkedin.
the class GoalOptimizer method optimizations.
/**
* Depending the existence of dead/decommissioned brokers in the given cluster:
* (1) Re-balance: Generates proposals to update the state of the cluster to achieve a final balanced state.
* (2) Self-healing: Generates proposals to move replicas away from decommissioned brokers.
* Returns a map from goal names to stats. Initial stats are returned under goal name "init".
*
* @param clusterModel The state of the cluster over which the balancing proposal will be applied. Function execution
* updates the cluster state with balancing proposals. If the cluster model is specified, the
* cached proposal will be ignored.
* @param goalsByPriority the goals ordered by priority.
* @param operationProgress to report the job progress.
* @return Results of optimization containing the proposals and stats.
*/
public OptimizerResult optimizations(ClusterModel clusterModel, Map<Integer, Goal> goalsByPriority, OperationProgress operationProgress) throws KafkaCruiseControlException {
if (clusterModel == null) {
throw new IllegalArgumentException("The cluster model cannot be null");
}
// Sanity check for optimizing goals.
if (!clusterModel.isClusterAlive()) {
throw new IllegalArgumentException("All brokers are dead in the cluster.");
}
LOG.trace("Cluster before optimization is {}", clusterModel);
ClusterModel.BrokerStats brokerStatsBeforeOptimization = clusterModel.brokerStats();
Map<TopicPartition, List<Integer>> initReplicaDistribution = clusterModel.getReplicaDistribution();
Map<TopicPartition, Integer> initLeaderDistribution = clusterModel.getLeaderDistribution();
boolean isSelfHealing = !clusterModel.selfHealingEligibleReplicas().isEmpty();
// Set of balancing proposals that will be applied to the given cluster state to satisfy goals (leadership
// transfer AFTER partition transfer.)
Set<Goal> optimizedGoals = new HashSet<>();
Set<Goal> violatedGoalsBeforeOptimization = new HashSet<>();
Set<Goal> violatedGoalsAfterOptimization = new HashSet<>();
Map<Goal, ClusterModelStats> statsByGoalPriority = new LinkedHashMap<>();
Map<TopicPartition, List<Integer>> preOptimizedReplicaDistribution = null;
Map<TopicPartition, Integer> preOptimizedLeaderDistribution = null;
Set<String> excludedTopics = excludedTopics(clusterModel);
LOG.debug("Topics excluded from partition movement: {}", excludedTopics);
for (Map.Entry<Integer, Goal> entry : goalsByPriority.entrySet()) {
preOptimizedReplicaDistribution = preOptimizedReplicaDistribution == null ? initReplicaDistribution : clusterModel.getReplicaDistribution();
preOptimizedLeaderDistribution = preOptimizedLeaderDistribution == null ? initLeaderDistribution : clusterModel.getLeaderDistribution();
Goal goal = entry.getValue();
OptimizationForGoal step = new OptimizationForGoal(goal.name());
operationProgress.addStep(step);
LOG.debug("Optimizing goal {}", goal.name());
boolean succeeded = goal.optimize(clusterModel, optimizedGoals, excludedTopics);
optimizedGoals.add(goal);
statsByGoalPriority.put(goal, clusterModel.getClusterStats(_balancingConstraint));
Set<ExecutionProposal> goalProposals = AnalyzerUtils.getDiff(preOptimizedReplicaDistribution, preOptimizedLeaderDistribution, clusterModel);
if (!goalProposals.isEmpty() || !succeeded) {
violatedGoalsBeforeOptimization.add(goal);
}
if (!succeeded) {
violatedGoalsAfterOptimization.add(goal);
}
logProgress(isSelfHealing, goal.name(), optimizedGoals.size(), goalProposals);
step.done();
LOG.debug("Broker level stats after optimization: {}", clusterModel.brokerStats());
}
clusterModel.sanityCheck();
// Broker level stats in the final cluster state.
if (LOG.isTraceEnabled()) {
LOG.trace("Broker level stats after optimization: {}%n", clusterModel.brokerStats());
}
Set<ExecutionProposal> proposals = AnalyzerUtils.getDiff(initReplicaDistribution, initLeaderDistribution, clusterModel);
return new OptimizerResult(statsByGoalPriority, violatedGoalsBeforeOptimization, violatedGoalsAfterOptimization, proposals, brokerStatsBeforeOptimization, clusterModel.brokerStats(), clusterModel.generation(), clusterModel.getClusterStats(_balancingConstraint));
}
Aggregations