use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.
the class RandomGoalTest method data.
/**
* Populate parameters for the {@link OptimizationVerifier}. All brokers are alive.
*
* @return Parameters for the {@link OptimizationVerifier}.
*/
@Parameters
public static Collection<Object[]> data() throws Exception {
int goalRepetition = 4;
Collection<Object[]> p = new ArrayList<>();
List<String> goalsSortedByPriority = Arrays.asList(RackAwareGoal.class.getName(), ReplicaCapacityGoal.class.getName(), DiskCapacityGoal.class.getName(), NetworkInboundCapacityGoal.class.getName(), NetworkOutboundCapacityGoal.class.getName(), CpuCapacityGoal.class.getName(), ReplicaDistributionGoal.class.getName(), PotentialNwOutGoal.class.getName(), DiskUsageDistributionGoal.class.getName(), NetworkInboundUsageDistributionGoal.class.getName(), NetworkOutboundUsageDistributionGoal.class.getName(), CpuUsageDistributionGoal.class.getName(), TopicReplicaDistributionGoal.class.getName(), PreferredLeaderElectionGoal.class.getName(), LeaderBytesInDistributionGoal.class.getName());
Properties props = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(1500L));
BalancingConstraint balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
balancingConstraint.setResourceBalancePercentage(TestConstants.LOW_BALANCE_PERCENTAGE);
balancingConstraint.setCapacityThreshold(TestConstants.MEDIUM_CAPACITY_THRESHOLD);
List<OptimizationVerifier.Verification> verifications = Arrays.asList(NEW_BROKERS, DEAD_BROKERS, REGRESSION);
// Test: Single goal at a time.
int goalPriority = 1;
for (String goalName : goalsSortedByPriority) {
Map<Integer, String> singletonGoalNameByPriority = Collections.singletonMap(goalPriority, goalName);
p.add(params(Collections.emptyMap(), singletonGoalNameByPriority, balancingConstraint, verifications));
goalPriority++;
}
// Test: Consecutive repetition of the same goal (goalRepetition times each).
goalPriority = 1;
for (String goalName : goalsSortedByPriority) {
Map<Integer, String> repeatedGoalNamesByPriority = new HashMap<>();
for (int i = 0; i < goalRepetition; i++) {
repeatedGoalNamesByPriority.put(goalPriority, goalName);
goalPriority++;
}
p.add(params(Collections.emptyMap(), repeatedGoalNamesByPriority, balancingConstraint, verifications));
}
// Test: Nested repetition of the same goal (goalRepetition times each).
goalPriority = 1;
Map<Integer, String> nonRepetitiveGoalNamesByPriority = new HashMap<>();
for (int i = 0; i < goalRepetition; i++) {
for (String goalName : goalsSortedByPriority) {
nonRepetitiveGoalNamesByPriority.put(goalPriority, goalName);
goalPriority++;
}
}
p.add(params(Collections.emptyMap(), nonRepetitiveGoalNamesByPriority, balancingConstraint, verifications));
// Test: No goal.
p.add(params(Collections.emptyMap(), Collections.emptyMap(), balancingConstraint, verifications));
// Test shuffled soft goals.
List<String> shuffledSoftGoalNames = new ArrayList<>(goalsSortedByPriority);
// Remove the hard goals.
shuffledSoftGoalNames.remove(RackAwareGoal.class.getName());
shuffledSoftGoalNames.remove(ReplicaCapacityGoal.class.getName());
shuffledSoftGoalNames.remove(CpuCapacityGoal.class.getName());
shuffledSoftGoalNames.remove(DiskCapacityGoal.class.getName());
shuffledSoftGoalNames.remove(NetworkInboundCapacityGoal.class.getName());
shuffledSoftGoalNames.remove(NetworkOutboundCapacityGoal.class.getName());
Collections.shuffle(shuffledSoftGoalNames, RANDOM);
goalPriority = 1;
Map<Integer, String> randomOrderedSoftGoalsByPriority = new HashMap<>();
for (String goalName : shuffledSoftGoalNames) {
randomOrderedSoftGoalsByPriority.put(goalPriority, goalName);
goalPriority++;
}
p.add(params(Collections.emptyMap(), randomOrderedSoftGoalsByPriority, balancingConstraint, verifications));
return p;
}
use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.
the class RandomSelfHealingTest method data.
/**
* Populate parameters for the {@link OptimizationVerifier}. All brokers are alive.
*
* @return Parameters for the {@link OptimizationVerifier}.
*/
@Parameters(name = "{1}-{0}")
public static Collection<Object[]> data() {
Collection<Object[]> p = new ArrayList<>();
Map<Integer, String> goalNameByPriority = new HashMap<>();
goalNameByPriority.put(1, RackAwareGoal.class.getName());
goalNameByPriority.put(2, ReplicaCapacityGoal.class.getName());
goalNameByPriority.put(3, DiskCapacityGoal.class.getName());
goalNameByPriority.put(4, NetworkInboundCapacityGoal.class.getName());
goalNameByPriority.put(5, NetworkOutboundCapacityGoal.class.getName());
goalNameByPriority.put(6, CpuCapacityGoal.class.getName());
goalNameByPriority.put(7, ReplicaDistributionGoal.class.getName());
goalNameByPriority.put(8, PotentialNwOutGoal.class.getName());
goalNameByPriority.put(9, DiskUsageDistributionGoal.class.getName());
goalNameByPriority.put(10, NetworkInboundUsageDistributionGoal.class.getName());
goalNameByPriority.put(11, NetworkOutboundUsageDistributionGoal.class.getName());
goalNameByPriority.put(12, CpuUsageDistributionGoal.class.getName());
goalNameByPriority.put(13, TopicReplicaDistributionGoal.class.getName());
Map<Integer, String> kafkaAssignerGoals = new HashMap<>();
kafkaAssignerGoals.put(0, KafkaAssignerEvenRackAwareGoal.class.getName());
kafkaAssignerGoals.put(1, KafkaAssignerDiskUsageDistributionGoal.class.getName());
Properties props = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(2000L));
BalancingConstraint balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
balancingConstraint.setResourceBalancePercentage(TestConstants.LOW_BALANCE_PERCENTAGE);
balancingConstraint.setCapacityThreshold(TestConstants.MEDIUM_CAPACITY_THRESHOLD);
List<OptimizationVerifier.Verification> verifications = Arrays.asList(NEW_BROKERS, DEAD_BROKERS, REGRESSION);
List<OptimizationVerifier.Verification> kafkaAssignerVerifications = Arrays.asList(DEAD_BROKERS, REGRESSION, GOAL_VIOLATION);
// -- TEST DECK #1: SINGLE DEAD BROKER.
// Test: Single Goal.
Map<ClusterProperty, Number> singleDeadBroker = new HashMap<>();
singleDeadBroker.put(ClusterProperty.NUM_DEAD_BROKERS, 1);
int testId = 0;
for (Map.Entry<Integer, String> entry : goalNameByPriority.entrySet()) {
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(entry.getKey(), entry.getValue()), balancingConstraint, Collections.emptySet(), verifications, true));
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(entry.getKey(), entry.getValue()), balancingConstraint, Collections.emptySet(), verifications, false));
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(entry.getKey(), entry.getValue()), balancingConstraint, Collections.singleton("T0"), verifications, true));
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(entry.getKey(), entry.getValue()), balancingConstraint, Collections.singleton("T0"), verifications, false));
}
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(0, KafkaAssignerEvenRackAwareGoal.class.getName()), balancingConstraint, Collections.emptySet(), kafkaAssignerVerifications, true));
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(0, KafkaAssignerEvenRackAwareGoal.class.getName()), balancingConstraint, Collections.emptySet(), kafkaAssignerVerifications, false));
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(0, KafkaAssignerEvenRackAwareGoal.class.getName()), balancingConstraint, Collections.singleton("T0"), kafkaAssignerVerifications, true));
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(0, KafkaAssignerEvenRackAwareGoal.class.getName()), balancingConstraint, Collections.singleton("T0"), kafkaAssignerVerifications, false));
props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(5100L));
balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
balancingConstraint.setResourceBalancePercentage(TestConstants.LOW_BALANCE_PERCENTAGE);
balancingConstraint.setCapacityThreshold(TestConstants.MEDIUM_CAPACITY_THRESHOLD);
// Test: All Goals.
p.add(params(testId++, singleDeadBroker, goalNameByPriority, balancingConstraint, Collections.emptySet(), verifications, true));
p.add(params(testId++, singleDeadBroker, goalNameByPriority, balancingConstraint, Collections.singleton("T0"), verifications, true));
p.add(params(testId++, singleDeadBroker, kafkaAssignerGoals, balancingConstraint, Collections.emptySet(), kafkaAssignerVerifications, true));
p.add(params(testId++, singleDeadBroker, kafkaAssignerGoals, balancingConstraint, Collections.singleton("T0"), kafkaAssignerVerifications, true));
// -- TEST DECK #2: MULTIPLE DEAD BROKERS.
// Test: Single Goal.
Map<ClusterProperty, Number> multipleDeadBrokers = new HashMap<>();
multipleDeadBrokers.put(ClusterProperty.NUM_DEAD_BROKERS, 5);
for (Map.Entry<Integer, String> entry : goalNameByPriority.entrySet()) {
p.add(params(testId++, multipleDeadBrokers, Collections.singletonMap(entry.getKey(), entry.getValue()), balancingConstraint, Collections.emptySet(), verifications, true));
p.add(params(testId++, multipleDeadBrokers, Collections.singletonMap(entry.getKey(), entry.getValue()), balancingConstraint, Collections.singleton("T0"), verifications, true));
}
p.add(params(testId++, multipleDeadBrokers, Collections.singletonMap(0, KafkaAssignerEvenRackAwareGoal.class.getName()), balancingConstraint, Collections.emptySet(), kafkaAssignerVerifications, true));
p.add(params(testId++, multipleDeadBrokers, Collections.singletonMap(0, KafkaAssignerEvenRackAwareGoal.class.getName()), balancingConstraint, Collections.singleton("T0"), kafkaAssignerVerifications, true));
// Test: All Goals.
p.add(params(testId++, multipleDeadBrokers, goalNameByPriority, balancingConstraint, Collections.emptySet(), verifications, true));
p.add(params(testId++, multipleDeadBrokers, goalNameByPriority, balancingConstraint, Collections.singleton("T0"), verifications, true));
p.add(params(testId++, multipleDeadBrokers, kafkaAssignerGoals, balancingConstraint, Collections.emptySet(), kafkaAssignerVerifications, true));
p.add(params(testId++, multipleDeadBrokers, kafkaAssignerGoals, balancingConstraint, Collections.singleton("T0"), kafkaAssignerVerifications, true));
return p;
}
use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.
the class KafkaAssignerDiskUsageDistributionGoalTest method test.
@Test
public void test() {
Properties props = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(10L));
props.setProperty(KafkaCruiseControlConfig.DISK_BALANCE_THRESHOLD_CONFIG, "1.05");
BalancingConstraint balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
KafkaAssignerDiskUsageDistributionGoal goal = new KafkaAssignerDiskUsageDistributionGoal(balancingConstraint);
ClusterModel clusterModel = createClusterModel();
goal.optimize(clusterModel, Collections.emptySet(), Collections.emptySet());
for (Broker b : clusterModel.brokers()) {
System.out.println("Broker " + b.id() + " = " + b.load().expectedUtilizationFor(DISK));
}
}
use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.
the class KafkaAssignerDiskUsageDistributionGoalTest method testSwapReplicas.
@Test
public void testSwapReplicas() {
Properties props = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(10L));
props.setProperty(KafkaCruiseControlConfig.DISK_BALANCE_THRESHOLD_CONFIG, "1.05");
BalancingConstraint balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
KafkaAssignerDiskUsageDistributionGoal goal = new KafkaAssignerDiskUsageDistributionGoal(balancingConstraint);
ClusterModel clusterModel = createClusterModel();
double meanDiskUsage = clusterModel.load().expectedUtilizationFor(DISK) / clusterModel.capacityFor(DISK);
assertTrue(goal.swapReplicas(clusterModel.broker(0), clusterModel.broker(1), meanDiskUsage, clusterModel, Collections.emptySet()));
assertFalse(goal.swapReplicas(clusterModel.broker(0), clusterModel.broker(2), meanDiskUsage, clusterModel, Collections.emptySet()));
assertTrue(goal.swapReplicas(clusterModel.broker(2), clusterModel.broker(3), meanDiskUsage, clusterModel, Collections.emptySet()));
}
use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.
the class DeterministicClusterTest method data.
/**
* Populate parameters for the {@link OptimizationVerifier}. All brokers are alive.
*
* @return Parameters for the {@link OptimizationVerifier}.
*/
@Parameterized.Parameters
public static Collection<Object[]> data() {
Collection<Object[]> p = new ArrayList<>();
Map<Integer, String> goalNameByPriority = new HashMap<>();
goalNameByPriority.put(1, RackAwareGoal.class.getName());
goalNameByPriority.put(2, ReplicaCapacityGoal.class.getName());
goalNameByPriority.put(3, DiskCapacityGoal.class.getName());
goalNameByPriority.put(4, NetworkInboundCapacityGoal.class.getName());
goalNameByPriority.put(5, NetworkOutboundCapacityGoal.class.getName());
goalNameByPriority.put(6, CpuCapacityGoal.class.getName());
goalNameByPriority.put(7, ReplicaDistributionGoal.class.getName());
goalNameByPriority.put(8, PotentialNwOutGoal.class.getName());
goalNameByPriority.put(9, DiskUsageDistributionGoal.class.getName());
goalNameByPriority.put(10, NetworkInboundUsageDistributionGoal.class.getName());
goalNameByPriority.put(11, NetworkOutboundUsageDistributionGoal.class.getName());
goalNameByPriority.put(12, CpuUsageDistributionGoal.class.getName());
goalNameByPriority.put(13, TopicReplicaDistributionGoal.class.getName());
goalNameByPriority.put(14, PreferredLeaderElectionGoal.class.getName());
goalNameByPriority.put(15, LeaderBytesInDistributionGoal.class.getName());
Properties props = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(6L));
BalancingConstraint balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
List<OptimizationVerifier.Verification> verifications = Arrays.asList(NEW_BROKERS, DEAD_BROKERS, REGRESSION);
// ----------##TEST: BALANCE PERCENTAGES.
balancingConstraint.setCapacityThreshold(TestConstants.MEDIUM_CAPACITY_THRESHOLD);
List<Double> balancePercentages = new ArrayList<>();
balancePercentages.add(TestConstants.HIGH_BALANCE_PERCENTAGE);
balancePercentages.add(TestConstants.MEDIUM_BALANCE_PERCENTAGE);
balancePercentages.add(TestConstants.LOW_BALANCE_PERCENTAGE);
// -- TEST DECK #1: SMALL CLUSTER.
for (Double balancePercentage : balancePercentages) {
balancingConstraint.setResourceBalancePercentage(balancePercentage);
p.add(params(balancingConstraint, DeterministicCluster.smallClusterModel(TestConstants.BROKER_CAPACITY), goalNameByPriority, verifications, null));
}
// -- TEST DECK #2: MEDIUM CLUSTER.
for (Double balancePercentage : balancePercentages) {
balancingConstraint.setResourceBalancePercentage(balancePercentage);
p.add(params(balancingConstraint, DeterministicCluster.mediumClusterModel(TestConstants.BROKER_CAPACITY), goalNameByPriority, verifications, null));
}
// ----------##TEST: CAPACITY THRESHOLD.
balancingConstraint.setResourceBalancePercentage(TestConstants.MEDIUM_BALANCE_PERCENTAGE);
List<Double> capacityThresholds = new ArrayList<>();
capacityThresholds.add(TestConstants.HIGH_CAPACITY_THRESHOLD);
capacityThresholds.add(TestConstants.MEDIUM_CAPACITY_THRESHOLD);
capacityThresholds.add(TestConstants.LOW_CAPACITY_THRESHOLD);
// -- TEST DECK #3: SMALL CLUSTER.
for (Double capacityThreshold : capacityThresholds) {
balancingConstraint.setCapacityThreshold(capacityThreshold);
p.add(params(balancingConstraint, DeterministicCluster.smallClusterModel(TestConstants.BROKER_CAPACITY), goalNameByPriority, verifications, null));
}
// -- TEST DECK #4: MEDIUM CLUSTER.
for (Double capacityThreshold : capacityThresholds) {
balancingConstraint.setCapacityThreshold(capacityThreshold);
p.add(params(balancingConstraint, DeterministicCluster.mediumClusterModel(TestConstants.BROKER_CAPACITY), goalNameByPriority, verifications, null));
}
// ----------##TEST: BROKER CAPACITY.
List<Double> brokerCapacities = new ArrayList<>();
brokerCapacities.add(TestConstants.LARGE_BROKER_CAPACITY);
brokerCapacities.add(TestConstants.MEDIUM_BROKER_CAPACITY);
brokerCapacities.add(TestConstants.SMALL_BROKER_CAPACITY);
// -- TEST DECK #5: SMALL AND MEDIUM CLUSTERS.
for (Double capacity : brokerCapacities) {
Map<Resource, Double> testBrokerCapacity = new HashMap<>();
testBrokerCapacity.put(Resource.CPU, capacity);
testBrokerCapacity.put(Resource.DISK, capacity);
testBrokerCapacity.put(Resource.NW_IN, capacity);
testBrokerCapacity.put(Resource.NW_OUT, capacity);
p.add(params(balancingConstraint, DeterministicCluster.smallClusterModel(testBrokerCapacity), goalNameByPriority, verifications, null));
p.add(params(balancingConstraint, DeterministicCluster.mediumClusterModel(testBrokerCapacity), goalNameByPriority, verifications, null));
}
Map<Integer, String> kafkaAssignerGoals = new HashMap<>();
kafkaAssignerGoals.put(0, KafkaAssignerEvenRackAwareGoal.class.getName());
kafkaAssignerGoals.put(1, KafkaAssignerDiskUsageDistributionGoal.class.getName());
List<OptimizationVerifier.Verification> kafkaAssignerVerifications = Arrays.asList(DEAD_BROKERS, REGRESSION);
// Small cluster.
p.add(params(balancingConstraint, DeterministicCluster.smallClusterModel(TestConstants.BROKER_CAPACITY), kafkaAssignerGoals, kafkaAssignerVerifications, null));
// Medium cluster.
p.add(params(balancingConstraint, DeterministicCluster.mediumClusterModel(TestConstants.BROKER_CAPACITY), kafkaAssignerGoals, kafkaAssignerVerifications, null));
// Rack-aware satisfiable.
p.add(params(balancingConstraint, DeterministicCluster.rackAwareSatisfiable(), kafkaAssignerGoals, kafkaAssignerVerifications, null));
// Rack-aware unsatisfiable.
p.add(params(balancingConstraint, DeterministicCluster.rackAwareUnsatisfiable(), kafkaAssignerGoals, kafkaAssignerVerifications, OptimizationFailureException.class));
return p;
}
Aggregations