use of com.linkedin.kafka.cruisecontrol.model.ReplicaPlacementInfo in project cruise-control by linkedin.
the class ExecutorTest method testSubmitReplicaReassignmentTasksWithInProgressTaskAndExistingTopic.
@Test
public void testSubmitReplicaReassignmentTasksWithInProgressTaskAndExistingTopic() throws InterruptedException, ExecutionException {
Map<String, TopicDescription> topicDescriptions = createTopics(0);
int initialLeader0 = topicDescriptions.get(TOPIC0).partitions().get(0).leader().id();
ExecutionProposal proposal0 = new ExecutionProposal(TP0, 0, new ReplicaPlacementInfo(initialLeader0), Collections.singletonList(new ReplicaPlacementInfo(initialLeader0)), Collections.singletonList(new ReplicaPlacementInfo(initialLeader0 == 0 ? 1 : 0)));
AdminClient adminClient = KafkaCruiseControlUtils.createAdminClient(Collections.singletonMap(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, broker(0).plaintextAddr()));
ExecutionTask task = new ExecutionTask(0, proposal0, ExecutionTask.TaskType.INTER_BROKER_REPLICA_ACTION, EXECUTION_ALERTING_THRESHOLD_MS);
task.inProgress(MOCK_CURRENT_TIME);
AlterPartitionReassignmentsResult result = ExecutionUtils.submitReplicaReassignmentTasks(adminClient, Collections.singletonList(task));
assertEquals(1, result.values().size());
// Can retrieve the future if it is successful.
result.values().get(TP0).get();
}
use of com.linkedin.kafka.cruisecontrol.model.ReplicaPlacementInfo in project cruise-control by linkedin.
the class ExecutionTaskManagerTest method testStateChangeSequences.
@Test
public void testStateChangeSequences() {
TopicPartition tp = new TopicPartition("topic", 0);
List<List<ExecutionTaskState>> testSequences = new ArrayList<>();
// Completed successfully.
testSequences.add(Arrays.asList(ExecutionTaskState.IN_PROGRESS, ExecutionTaskState.COMPLETED));
// Rollback succeeded.
testSequences.add(Arrays.asList(ExecutionTaskState.IN_PROGRESS, ExecutionTaskState.ABORTING, ExecutionTaskState.ABORTED));
// Rollback failed.
testSequences.add(Arrays.asList(ExecutionTaskState.IN_PROGRESS, ExecutionTaskState.ABORTING, ExecutionTaskState.DEAD));
// Cannot rollback.
testSequences.add(Arrays.asList(ExecutionTaskState.IN_PROGRESS, ExecutionTaskState.DEAD));
ReplicaPlacementInfo r0 = new ReplicaPlacementInfo(0);
ReplicaPlacementInfo r1 = new ReplicaPlacementInfo(1);
ReplicaPlacementInfo r2 = new ReplicaPlacementInfo(2);
// Make sure the proposal does not involve leader movement.
ExecutionProposal proposal = new ExecutionProposal(tp, 10, r2, Arrays.asList(r0, r2), Arrays.asList(r2, r1));
StrategyOptions strategyOptions = new StrategyOptions.Builder(generateExpectedCluster(proposal)).build();
for (List<ExecutionTaskState> sequence : testSequences) {
taskManager.clear();
taskManager.setExecutionModeForTaskTracker(false);
taskManager.addExecutionProposals(Collections.singletonList(proposal), Collections.emptySet(), strategyOptions, null);
taskManager.setRequestedInterBrokerPartitionMovementConcurrency(null);
taskManager.setRequestedIntraBrokerPartitionMovementConcurrency(null);
taskManager.setRequestedLeadershipMovementConcurrency(null);
List<ExecutionTask> tasks = taskManager.getInterBrokerReplicaMovementTasks();
assertEquals(1, tasks.size());
ExecutionTask task = tasks.get(0);
verifyStateChangeSequence(sequence, task, taskManager);
}
// Verify that the movement concurrency matches the default configuration
for (ConcurrencyType concurrencyType : ConcurrencyType.cachedValues()) {
assertEquals(MOCK_DEFAULT_CONCURRENCY.get(concurrencyType).intValue(), taskManager.movementConcurrency(concurrencyType));
}
}
use of com.linkedin.kafka.cruisecontrol.model.ReplicaPlacementInfo in project cruise-control by linkedin.
the class ExecutionTaskPlannerTest method generateExpectedReplicas.
private Node[] generateExpectedReplicas(ExecutionProposal proposal) {
int i = 0;
Node[] expectedProposalReplicas = new Node[proposal.oldReplicas().size()];
for (ReplicaPlacementInfo oldId : proposal.oldReplicas()) {
expectedProposalReplicas[i++] = new Node(oldId.brokerId(), "null", -1);
}
return expectedProposalReplicas;
}
use of com.linkedin.kafka.cruisecontrol.model.ReplicaPlacementInfo in project cruise-control by linkedin.
the class PreferredLeaderElectionGoalTest method testOptimizeWithDemotedBrokersAndExcludeFollowerDemotion.
@Test
public void testOptimizeWithDemotedBrokersAndExcludeFollowerDemotion() {
ClusterModel clusterModel = createClusterModel(true, false).clusterModel();
clusterModel.setBrokerState(2, Broker.State.DEMOTED);
Map<TopicPartition, ReplicaPlacementInfo> originalLeaderDistribution = clusterModel.getLeaderDistribution();
Map<TopicPartition, List<ReplicaPlacementInfo>> originalReplicaDistribution = clusterModel.getReplicaDistribution();
PreferredLeaderElectionGoal goal = new PreferredLeaderElectionGoal(false, true, null);
// Before the optimization, goals are expected to be undecided wrt their provision status.
assertEquals(ProvisionStatus.UNDECIDED, goal.provisionResponse().status());
goal.optimize(clusterModel, Collections.emptySet(), new OptimizationOptions(Collections.emptySet(), Collections.emptySet(), Collections.emptySet()));
// After the optimization, PreferredLeaderElectionGoal is expected to be undecided wrt its provision status.
assertEquals(ProvisionStatus.UNDECIDED, goal.provisionResponse().status());
Map<TopicPartition, List<ReplicaPlacementInfo>> optimizedReplicaDistribution = clusterModel.getReplicaDistribution();
for (String t : Arrays.asList(TOPIC0, TOPIC1, TOPIC2)) {
for (int p = 0; p < 3; p++) {
TopicPartition tp = new TopicPartition(t, p);
if (originalReplicaDistribution.get(tp).contains(2)) {
if (originalLeaderDistribution.get(tp).brokerId() == 2) {
List<Integer> replicas = optimizedReplicaDistribution.get(tp).stream().mapToInt(ReplicaPlacementInfo::brokerId).boxed().collect(Collectors.toList());
assertEquals("Tp " + tp, 2, replicas.get(replicas.size() - 1).intValue());
} else {
assertEquals("Tp " + tp, originalReplicaDistribution.get(tp), optimizedReplicaDistribution.get(tp));
}
}
}
}
}
use of com.linkedin.kafka.cruisecontrol.model.ReplicaPlacementInfo in project cruise-control by linkedin.
the class OptimizationVerifier method verifySoftGoalReplicaMovements.
private static boolean verifySoftGoalReplicaMovements(OptimizerResult result, OptimizerResult resultOfPreviousPass, Set<Replica> offlineReplicas, List<Goal> goalByPriority) {
// self-healing cluster.
if (goalByPriority.stream().anyMatch(Goal::isHardGoal)) {
return true;
}
// Populate offline replicas.
Map<TopicPartition, Set<Integer>> offlineReplicasByTopicPartition = new HashMap<>();
offlineReplicas.forEach(r -> {
offlineReplicasByTopicPartition.putIfAbsent(r.topicPartition(), new HashSet<>());
offlineReplicasByTopicPartition.get(r.topicPartition()).add(r.originalBroker().id());
});
// Populate replicas moved in previous pass, these replicas will be treated as immigrant replicas when soft goals calculating
// execution proposals.
Map<TopicPartition, Set<ReplicaPlacementInfo>> replicasMovedInPreviousPass = new HashMap<>();
if (resultOfPreviousPass != null) {
resultOfPreviousPass.goalProposals().forEach(p -> replicasMovedInPreviousPass.put(p.topicPartition(), p.replicasToAdd()));
}
for (ExecutionProposal p : result.goalProposals()) {
for (ReplicaPlacementInfo r : p.replicasToRemove()) {
// (2) move immigrant replicas from alive brokers
if (!(offlineReplicasByTopicPartition.containsKey(p.topicPartition()) && offlineReplicasByTopicPartition.get(p.topicPartition()).contains(r.brokerId())) && (!replicasMovedInPreviousPass.containsKey(p.topicPartition()) || !replicasMovedInPreviousPass.get(p.topicPartition()).contains(r))) {
LOG.error("Self-healing replica movement generated by soft goals must be limited to only moving offline or immigrant replicas." + " Proposal violated: {}, goals used: {}).", p, goalByPriority);
return false;
}
}
}
return true;
}
Aggregations