use of com.linkedin.kafka.cruisecontrol.common.TopicMinIsrCache.MinIsrWithTime in project cruise-control by linkedin.
the class ExecutionTaskPlannerTest method testGetInterBrokerPartitionMovementWithMinIsrTasks.
@Test
public void testGetInterBrokerPartitionMovementWithMinIsrTasks() {
List<ExecutionProposal> proposals = new ArrayList<>();
proposals.add(_rf4PartitionMovement0);
proposals.add(_rf4PartitionMovement1);
proposals.add(_rf4PartitionMovement2);
proposals.add(_rf4PartitionMovement3);
// Test PrioritizeOneAboveMinIsrWithOfflineReplicasStrategy execution strategies.
// Create prioritizeOneAboveMinIsrMovementPlanner, chain after prioritizeMinIsr strategy
Properties prioritizeOneAboveMinIsrMovementProps = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
prioritizeOneAboveMinIsrMovementProps.setProperty(ExecutorConfig.DEFAULT_REPLICA_MOVEMENT_STRATEGIES_CONFIG, String.format("%s,%s", PrioritizeMinIsrWithOfflineReplicasStrategy.class.getName(), PrioritizeOneAboveMinIsrWithOfflineReplicasStrategy.class.getName()));
ExecutionTaskPlanner prioritizeOneAboveMinIsrMovementPlanner = new ExecutionTaskPlanner(null, new KafkaCruiseControlConfig(prioritizeOneAboveMinIsrMovementProps));
Set<PartitionInfo> partitions = new HashSet<>();
partitions.add(generatePartitionInfo(_rf4PartitionMovement0, false));
partitions.add(generatePartitionInfoWithUrpHavingOfflineReplica(_rf4PartitionMovement1, 1));
partitions.add(generatePartitionInfoWithUrpHavingOfflineReplica(_rf4PartitionMovement2, 3));
partitions.add(generatePartitionInfoWithUrpHavingOfflineReplica(_rf4PartitionMovement3, 2));
Cluster expectedCluster = new Cluster(null, _rf4ExpectedNodes, partitions, Collections.emptySet(), Collections.emptySet());
// Setting topic min ISR to 2
Map<String, MinIsrWithTime> minIsrWithTimeByTopic = Collections.singletonMap(TOPIC3, new MinIsrWithTime((short) 2, 0));
StrategyOptions strategyOptions = new StrategyOptions.Builder(expectedCluster).minIsrWithTimeByTopic(minIsrWithTimeByTopic).build();
Map<Integer, Integer> readyBrokers = new HashMap<>();
readyBrokers.put(0, 5);
readyBrokers.put(1, 6);
readyBrokers.put(2, 6);
readyBrokers.put(3, 6);
readyBrokers.put(4, 5);
readyBrokers.put(5, 6);
prioritizeOneAboveMinIsrMovementPlanner.addExecutionProposals(proposals, strategyOptions, null);
List<ExecutionTask> partitionMovementTasks = prioritizeOneAboveMinIsrMovementPlanner.getInterBrokerReplicaMovementTasks(readyBrokers, Collections.emptySet(), _defaultPartitionsMaxCap);
assertEquals("First task", _rf4PartitionMovement2, partitionMovementTasks.get(0).proposal());
assertEquals("Second task", _rf4PartitionMovement3, partitionMovementTasks.get(1).proposal());
assertEquals("Third task", _rf4PartitionMovement1, partitionMovementTasks.get(2).proposal());
assertEquals("Fourth task", _rf4PartitionMovement0, partitionMovementTasks.get(3).proposal());
}
use of com.linkedin.kafka.cruisecontrol.common.TopicMinIsrCache.MinIsrWithTime in project cruise-control by linkedin.
the class ConcurrencyAdjusterTest method testRecommendedMinIsrBasedConcurrency.
@Test
public void testRecommendedMinIsrBasedConcurrency() {
// Cluster with an online out of sync partition.
Cluster cluster = getClusterWithOutOfSyncPartition(1, false);
// 1. Verify a recommended decrease in concurrency for different concurrency types due to AtMinISR partitions without offline replicas.
// Cache with a single entry that makes the TP1 in cluster AtMinISR.
Map<String, MinIsrWithTime> minIsrWithTimeByTopic = Collections.singletonMap(TOPIC1, new MinIsrWithTime((short) 1, MOCK_TIME_MS));
// 1.1. Inter-broker replica reassignment (non-capped)
Integer recommendedConcurrency = ExecutionUtils.recommendedConcurrency(cluster, minIsrWithTimeByTopic, MOCK_MAX_PARTITION_MOVEMENTS_PER_BROKER, ConcurrencyType.INTER_BROKER_REPLICA);
assertEquals(MOCK_MAX_PARTITION_MOVEMENTS_PER_BROKER / MOCK_MD_INTER_BROKER_REPLICA, recommendedConcurrency.intValue());
// 1.2. Leadership reassignment (non-capped)
recommendedConcurrency = ExecutionUtils.recommendedConcurrency(cluster, minIsrWithTimeByTopic, MOCK_MAX_LEADERSHIP_MOVEMENTS, ConcurrencyType.LEADERSHIP);
assertEquals(MOCK_MAX_LEADERSHIP_MOVEMENTS / MOCK_MD_LEADERSHIP, recommendedConcurrency.intValue());
// 1.3. Inter-broker replica reassignment (capped)
int currentMovementConcurrency = MOCK_MIN_PARTITION_MOVEMENTS_PER_BROKER * MOCK_MD_INTER_BROKER_REPLICA;
recommendedConcurrency = ExecutionUtils.recommendedConcurrency(cluster, minIsrWithTimeByTopic, currentMovementConcurrency, ConcurrencyType.INTER_BROKER_REPLICA);
assertEquals(MOCK_MIN_PARTITION_MOVEMENTS_PER_BROKER, recommendedConcurrency.intValue());
// 1.4. Leadership reassignment (capped)
currentMovementConcurrency = (MOCK_MIN_LEADERSHIP_MOVEMENTS_CONFIG * MOCK_MD_LEADERSHIP) - 1;
recommendedConcurrency = ExecutionUtils.recommendedConcurrency(cluster, minIsrWithTimeByTopic, currentMovementConcurrency, ConcurrencyType.LEADERSHIP);
assertEquals(MOCK_MIN_LEADERSHIP_MOVEMENTS_CONFIG, recommendedConcurrency.intValue());
// 2. Verify a recommended cancellation of the execution (i.e. concurrency types is irrelevant) due to UnderMinISR partitions without
// offline replicas.
// Cache with a single entry that makes the TP1 in cluster UnderMinISR.
minIsrWithTimeByTopic = Collections.singletonMap(TOPIC1, new MinIsrWithTime((short) 2, MOCK_TIME_MS));
recommendedConcurrency = ExecutionUtils.recommendedConcurrency(cluster, minIsrWithTimeByTopic, MOCK_MAX_PARTITION_MOVEMENTS_PER_BROKER, ConcurrencyType.INTER_BROKER_REPLICA);
assertEquals(ExecutionUtils.CANCEL_THE_EXECUTION, recommendedConcurrency.intValue());
// 3. Verify that if the minISR value for topics containing (At/Under)MinISR partitions in the given Kafka cluster is missing from the
// given cache, then no change in concurrency is recommended.
recommendedConcurrency = ExecutionUtils.recommendedConcurrency(cluster, Collections.emptyMap(), MOCK_MAX_PARTITION_MOVEMENTS_PER_BROKER, ConcurrencyType.INTER_BROKER_REPLICA);
assertNull(recommendedConcurrency);
// 4. Verify no change in concurrency due to lack of (At/Under)MinISR partitions (i.e. concurrency types is irrelevant)
// Cluster with an all in-sync partition.
cluster = getClusterWithOutOfSyncPartition(2, false);
// Cache with a single entry that makes the TP1 in cluster not (At/Under)MinISR.
minIsrWithTimeByTopic = Collections.singletonMap(TOPIC1, new MinIsrWithTime((short) 1, MOCK_TIME_MS));
recommendedConcurrency = ExecutionUtils.recommendedConcurrency(cluster, minIsrWithTimeByTopic, MOCK_MAX_PARTITION_MOVEMENTS_PER_BROKER, ConcurrencyType.INTER_BROKER_REPLICA);
assertNull(recommendedConcurrency);
// 5. Verify no change in concurrency due to (At/Under)MinISR partitions with an offline replica (i.e. concurrency types is irrelevant)
// Cluster with an offline out of sync partition.
cluster = getClusterWithOutOfSyncPartition(1, true);
// 5.1. Recommendation with AtMinISR partition containing an offline replica.
recommendedConcurrency = ExecutionUtils.recommendedConcurrency(cluster, minIsrWithTimeByTopic, MOCK_MAX_PARTITION_MOVEMENTS_PER_BROKER, ConcurrencyType.INTER_BROKER_REPLICA);
assertNull(recommendedConcurrency);
// 5.2. Recommendation with UnderMinISR partition containing an offline replica.
// Cache with a single entry that makes the TP1 in cluster UnderMinISR.
minIsrWithTimeByTopic = Collections.singletonMap(TOPIC1, new MinIsrWithTime((short) 2, MOCK_TIME_MS));
recommendedConcurrency = ExecutionUtils.recommendedConcurrency(cluster, minIsrWithTimeByTopic, MOCK_MAX_PARTITION_MOVEMENTS_PER_BROKER, ConcurrencyType.INTER_BROKER_REPLICA);
assertNull(recommendedConcurrency);
}
use of com.linkedin.kafka.cruisecontrol.common.TopicMinIsrCache.MinIsrWithTime in project cruise-control by linkedin.
the class ExecutionTaskPlannerTest method testGetInterBrokerPartitionMovementTasks.
@Test
public void testGetInterBrokerPartitionMovementTasks() {
List<ExecutionProposal> proposals = new ArrayList<>();
proposals.add(_partitionMovement0);
proposals.add(_partitionMovement1);
proposals.add(_partitionMovement2);
proposals.add(_partitionMovement3);
// Test different execution strategies.
ExecutionTaskPlanner basePlanner = new ExecutionTaskPlanner(null, new KafkaCruiseControlConfig(KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties()));
// Create postponeUrpPlanner
Properties postponeUrpProps = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
postponeUrpProps.setProperty(ExecutorConfig.DEFAULT_REPLICA_MOVEMENT_STRATEGIES_CONFIG, PostponeUrpReplicaMovementStrategy.class.getName());
ExecutionTaskPlanner postponeUrpPlanner = new ExecutionTaskPlanner(null, new KafkaCruiseControlConfig(postponeUrpProps));
// Create prioritizeLargeMovementPlanner
Properties prioritizeLargeMovementProps = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
prioritizeLargeMovementProps.setProperty(ExecutorConfig.DEFAULT_REPLICA_MOVEMENT_STRATEGIES_CONFIG, String.format("%s,%s", PrioritizeLargeReplicaMovementStrategy.class.getName(), BaseReplicaMovementStrategy.class.getName()));
ExecutionTaskPlanner prioritizeLargeMovementPlanner = new ExecutionTaskPlanner(null, new KafkaCruiseControlConfig(prioritizeLargeMovementProps));
// Create prioritizeSmallMovementPlanner
Properties prioritizeSmallMovementProps = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
prioritizeSmallMovementProps.setProperty(ExecutorConfig.DEFAULT_REPLICA_MOVEMENT_STRATEGIES_CONFIG, String.format("%s,%s", PrioritizeSmallReplicaMovementStrategy.class.getName(), BaseReplicaMovementStrategy.class.getName()));
ExecutionTaskPlanner prioritizeSmallMovementPlanner = new ExecutionTaskPlanner(null, new KafkaCruiseControlConfig(prioritizeSmallMovementProps));
// Create smallUrpMovementPlanner
Properties smallUrpMovementProps = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
smallUrpMovementProps.setProperty(ExecutorConfig.DEFAULT_REPLICA_MOVEMENT_STRATEGIES_CONFIG, String.format("%s,%s", PrioritizeSmallReplicaMovementStrategy.class.getName(), PostponeUrpReplicaMovementStrategy.class.getName()));
ExecutionTaskPlanner smallUrpMovementPlanner = new ExecutionTaskPlanner(null, new KafkaCruiseControlConfig(smallUrpMovementProps));
// Create contradictingMovementPlanner containing both small and large replica movements
Properties contradictingMovementProps = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
contradictingMovementProps.setProperty(ExecutorConfig.DEFAULT_REPLICA_MOVEMENT_STRATEGIES_CONFIG, String.format("%s,%s,%s", PrioritizeSmallReplicaMovementStrategy.class.getName(), PostponeUrpReplicaMovementStrategy.class.getName(), PrioritizeLargeReplicaMovementStrategy.class.getName()));
ExecutionTaskPlanner contradictingMovementPlanner = new ExecutionTaskPlanner(null, new KafkaCruiseControlConfig(contradictingMovementProps));
// Create prioritizeMinIsrMovementPlanner
Properties prioritizeMinIsrMovementProps = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
prioritizeMinIsrMovementProps.setProperty(ExecutorConfig.DEFAULT_REPLICA_MOVEMENT_STRATEGIES_CONFIG, PrioritizeMinIsrWithOfflineReplicasStrategy.class.getName());
ExecutionTaskPlanner prioritizeMinIsrMovementPlanner = new ExecutionTaskPlanner(null, new KafkaCruiseControlConfig(prioritizeMinIsrMovementProps));
Set<PartitionInfo> partitions = new HashSet<>();
partitions.add(generatePartitionInfoWithUrpHavingOfflineReplica(_partitionMovement0, true));
partitions.add(generatePartitionInfo(_partitionMovement1, false));
partitions.add(generatePartitionInfoWithUrpHavingOfflineReplica(_partitionMovement2, true));
partitions.add(generatePartitionInfo(_partitionMovement3, false));
Cluster expectedCluster = new Cluster(null, _expectedNodes, partitions, Collections.emptySet(), Collections.emptySet());
// This ensures that the _partitionMovement0 and _partitionMovement2 are AtMinISR, while the other partitions are not.
Map<String, MinIsrWithTime> minIsrWithTimeByTopic = Collections.singletonMap(TOPIC2, new MinIsrWithTime((short) (_partitionMovement0.oldReplicas().size() - 1), 0));
StrategyOptions strategyOptions = new StrategyOptions.Builder(expectedCluster).minIsrWithTimeByTopic(minIsrWithTimeByTopic).build();
Map<Integer, Integer> readyBrokers = new HashMap<>();
readyBrokers.put(0, 14);
readyBrokers.put(1, 14);
readyBrokers.put(2, 14);
readyBrokers.put(3, 14);
basePlanner.addExecutionProposals(proposals, strategyOptions, null);
List<ExecutionTask> partitionMovementTasks = basePlanner.getInterBrokerReplicaMovementTasks(readyBrokers, Collections.emptySet(), _defaultPartitionsMaxCap);
assertEquals("First task", _partitionMovement0, partitionMovementTasks.get(0).proposal());
assertEquals("Second task", _partitionMovement2, partitionMovementTasks.get(1).proposal());
assertEquals("Third task", _partitionMovement1, partitionMovementTasks.get(2).proposal());
postponeUrpPlanner.addExecutionProposals(proposals, strategyOptions, null);
partitionMovementTasks = postponeUrpPlanner.getInterBrokerReplicaMovementTasks(readyBrokers, Collections.emptySet(), _defaultPartitionsMaxCap);
assertEquals("First task", _partitionMovement1, partitionMovementTasks.get(0).proposal());
assertEquals("Second task", _partitionMovement3, partitionMovementTasks.get(1).proposal());
assertEquals("Third task", _partitionMovement0, partitionMovementTasks.get(2).proposal());
prioritizeLargeMovementPlanner.addExecutionProposals(proposals, strategyOptions, null);
partitionMovementTasks = prioritizeLargeMovementPlanner.getInterBrokerReplicaMovementTasks(readyBrokers, Collections.emptySet(), _defaultPartitionsMaxCap);
assertEquals("First task", _partitionMovement1, partitionMovementTasks.get(0).proposal());
assertEquals("Second task", _partitionMovement3, partitionMovementTasks.get(1).proposal());
assertEquals("Third task", _partitionMovement2, partitionMovementTasks.get(2).proposal());
prioritizeSmallMovementPlanner.addExecutionProposals(proposals, strategyOptions, null);
partitionMovementTasks = prioritizeSmallMovementPlanner.getInterBrokerReplicaMovementTasks(readyBrokers, Collections.emptySet(), _defaultPartitionsMaxCap);
assertEquals("First task", _partitionMovement0, partitionMovementTasks.get(0).proposal());
assertEquals("Second task", _partitionMovement2, partitionMovementTasks.get(1).proposal());
assertEquals("Third task", _partitionMovement3, partitionMovementTasks.get(2).proposal());
assertEquals("Fourth task", _partitionMovement1, partitionMovementTasks.get(3).proposal());
smallUrpMovementPlanner.addExecutionProposals(proposals, strategyOptions, null);
partitionMovementTasks = smallUrpMovementPlanner.getInterBrokerReplicaMovementTasks(readyBrokers, Collections.emptySet(), _defaultPartitionsMaxCap);
assertEquals("First task", _partitionMovement3, partitionMovementTasks.get(0).proposal());
assertEquals("Second task", _partitionMovement1, partitionMovementTasks.get(1).proposal());
assertEquals("Third task", _partitionMovement0, partitionMovementTasks.get(2).proposal());
assertEquals("Fourth task", _partitionMovement2, partitionMovementTasks.get(3).proposal());
contradictingMovementPlanner.addExecutionProposals(proposals, strategyOptions, null);
partitionMovementTasks = contradictingMovementPlanner.getInterBrokerReplicaMovementTasks(readyBrokers, Collections.emptySet(), _defaultPartitionsMaxCap);
assertEquals("First task", _partitionMovement3, partitionMovementTasks.get(0).proposal());
assertEquals("Second task", _partitionMovement1, partitionMovementTasks.get(1).proposal());
assertEquals("Third task", _partitionMovement0, partitionMovementTasks.get(2).proposal());
assertEquals("Fourth task", _partitionMovement2, partitionMovementTasks.get(3).proposal());
prioritizeMinIsrMovementPlanner.addExecutionProposals(proposals, strategyOptions, null);
partitionMovementTasks = prioritizeMinIsrMovementPlanner.getInterBrokerReplicaMovementTasks(readyBrokers, Collections.emptySet(), _defaultPartitionsMaxCap);
assertEquals("First task", _partitionMovement0, partitionMovementTasks.get(0).proposal());
assertEquals("Second task", _partitionMovement2, partitionMovementTasks.get(1).proposal());
assertEquals("Third task", _partitionMovement1, partitionMovementTasks.get(2).proposal());
assertEquals("Fourth task", _partitionMovement3, partitionMovementTasks.get(3).proposal());
}
use of com.linkedin.kafka.cruisecontrol.common.TopicMinIsrCache.MinIsrWithTime in project cruise-control by linkedin.
the class ExecutionUtils method populateMinIsrState.
/**
* Populates the given sets for partitions that are (1) UnderMinISR without any ({@code withOfflineReplicas=false}) or with at least one
* ({@code withOfflineReplicas=true}) offline replicas, (2) AtMinISR without any ({@code withOfflineReplicas=false}) or with at least
* one ({@code withOfflineReplicas=true}) offline replicas and (3) OneAboveMinISR without any ({@code withOfflineReplicas=false}) or with at least
* one ({@code withOfflineReplicas=true}) offline replicas using the topics from the given Kafka cluster and
* {@link org.apache.kafka.common.config.TopicConfig#MIN_IN_SYNC_REPLICAS_CONFIG} from the given {@code minIsrWithTimeByTopic}.
*
* If the minISR value for a topic in the given Kafka cluster is missing from the given {@code minIsrWithTimeByTopic}, this function skips
* populating minIsr state for partitions of that topic.
*
* @param cluster Kafka cluster.
* @param minIsrWithTimeByTopic Value and capture time of {@link org.apache.kafka.common.config.TopicConfig#MIN_IN_SYNC_REPLICAS_CONFIG} by topic.
* @param underMinIsrWithoutOfflineReplicas UnderMinISR without offline replicas.
* @param atMinIsrWithoutOfflineReplicas AtMinISR partitions without offline replicas.
* @param oneAboveMinIsrWithoutOfflineReplicas oneAboveMinISR partitions without offline replicas.
* @param withOfflineReplicas {@code true} to retrieve (At/Under/OneAbove)MinISR partitions each containing at least an offline replica,
* {@code false} to retrieve (At/Under/OneAbove)MinISR partitions without any offline replicas.
*/
public static void populateMinIsrState(Cluster cluster, Map<String, MinIsrWithTime> minIsrWithTimeByTopic, Set<PartitionInfo> underMinIsrWithoutOfflineReplicas, Set<PartitionInfo> atMinIsrWithoutOfflineReplicas, Set<PartitionInfo> oneAboveMinIsrWithoutOfflineReplicas, boolean withOfflineReplicas) {
for (String topic : cluster.topics()) {
MinIsrWithTime minIsrWithTime = minIsrWithTimeByTopic.get(topic);
if (minIsrWithTime == null) {
continue;
}
int minISR = minIsrWithTime.minISR();
for (PartitionInfo partitionInfo : cluster.partitionsForTopic(topic)) {
boolean hasOfflineReplica = partitionInfo.offlineReplicas().length != 0;
if (hasOfflineReplica != withOfflineReplicas) {
continue;
}
int numInSyncReplicas = partitionInfo.inSyncReplicas().length;
if (numInSyncReplicas < minISR && underMinIsrWithoutOfflineReplicas != null) {
underMinIsrWithoutOfflineReplicas.add(partitionInfo);
} else if (numInSyncReplicas == minISR && atMinIsrWithoutOfflineReplicas != null) {
atMinIsrWithoutOfflineReplicas.add(partitionInfo);
} else if (numInSyncReplicas == minISR + 1 && oneAboveMinIsrWithoutOfflineReplicas != null) {
oneAboveMinIsrWithoutOfflineReplicas.add(partitionInfo);
}
}
}
}
Aggregations