use of com.linkedin.kafka.cruisecontrol.common.ClusterProperty in project cruise-control by linkedin.
the class RandomClusterTest method data.
/**
* Populate parameters for the {@link OptimizationVerifier}. All brokers are alive.
*
* @return Parameters for the {@link OptimizationVerifier}.
*/
public static Collection<Object[]> data(TestConstants.Distribution distribution) {
Collection<Object[]> p = new ArrayList<>();
Map<Integer, String> goalNameByPriority = new HashMap<>();
goalNameByPriority.put(1, RackAwareGoal.class.getName());
goalNameByPriority.put(2, ReplicaCapacityGoal.class.getName());
goalNameByPriority.put(3, DiskCapacityGoal.class.getName());
goalNameByPriority.put(4, NetworkInboundCapacityGoal.class.getName());
goalNameByPriority.put(5, NetworkOutboundCapacityGoal.class.getName());
goalNameByPriority.put(6, CpuCapacityGoal.class.getName());
goalNameByPriority.put(7, ReplicaDistributionGoal.class.getName());
goalNameByPriority.put(8, PotentialNwOutGoal.class.getName());
goalNameByPriority.put(9, DiskUsageDistributionGoal.class.getName());
goalNameByPriority.put(10, NetworkInboundUsageDistributionGoal.class.getName());
goalNameByPriority.put(11, NetworkOutboundUsageDistributionGoal.class.getName());
goalNameByPriority.put(12, CpuUsageDistributionGoal.class.getName());
goalNameByPriority.put(13, TopicReplicaDistributionGoal.class.getName());
goalNameByPriority.put(14, PreferredLeaderElectionGoal.class.getName());
goalNameByPriority.put(15, LeaderBytesInDistributionGoal.class.getName());
Map<Integer, String> kafkaAssignerGoals = new HashMap<>();
kafkaAssignerGoals.put(1, KafkaAssignerEvenRackAwareGoal.class.getName());
kafkaAssignerGoals.put(2, KafkaAssignerDiskUsageDistributionGoal.class.getName());
List<OptimizationVerifier.Verification> verifications = Arrays.asList(NEW_BROKERS, DEAD_BROKERS, REGRESSION);
List<OptimizationVerifier.Verification> kafkaAssignerVerifications = Arrays.asList(GOAL_VIOLATION, DEAD_BROKERS, REGRESSION);
Properties props = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(1500L));
BalancingConstraint balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
balancingConstraint.setResourceBalancePercentage(TestConstants.LOW_BALANCE_PERCENTAGE);
balancingConstraint.setCapacityThreshold(TestConstants.MEDIUM_CAPACITY_THRESHOLD);
Map<ClusterProperty, Number> modifiedProperties;
// Test: Increase Broker Count
for (int i = 1; i <= 6; i++) {
modifiedProperties = new HashMap<>();
modifiedProperties.put(ClusterProperty.NUM_BROKERS, 20 + i * 20);
p.add(params(modifiedProperties, goalNameByPriority, distribution, balancingConstraint, verifications));
p.add(params(modifiedProperties, kafkaAssignerGoals, distribution, balancingConstraint, kafkaAssignerVerifications));
}
// Test: Increase Replica Count
props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(3000L));
balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
balancingConstraint.setResourceBalancePercentage(TestConstants.LOW_BALANCE_PERCENTAGE);
balancingConstraint.setCapacityThreshold(TestConstants.MEDIUM_CAPACITY_THRESHOLD);
for (int i = 7; i <= 12; i++) {
modifiedProperties = new HashMap<>();
modifiedProperties.put(ClusterProperty.NUM_REPLICAS, 50001 + (i - 7) * 5001);
p.add(params(modifiedProperties, goalNameByPriority, distribution, balancingConstraint, verifications));
p.add(params(modifiedProperties, kafkaAssignerGoals, distribution, balancingConstraint, kafkaAssignerVerifications));
}
// Test: Increase Topic Count
for (int i = 13; i <= 18; i++) {
modifiedProperties = new HashMap<>();
modifiedProperties.put(ClusterProperty.NUM_TOPICS, 3000 + (i - 13) * 1000);
p.add(params(modifiedProperties, goalNameByPriority, distribution, balancingConstraint, verifications));
p.add(params(modifiedProperties, kafkaAssignerGoals, distribution, balancingConstraint, kafkaAssignerVerifications));
}
// Test: Increase Replication Count
for (int i = 19; i <= 24; i++) {
modifiedProperties = new HashMap<>();
modifiedProperties.put(ClusterProperty.NUM_REPLICAS, 50000 - (50000 % (i - 16)));
modifiedProperties.put(ClusterProperty.MIN_REPLICATION, (i - 16));
modifiedProperties.put(ClusterProperty.MAX_REPLICATION, (i - 16));
p.add(params(modifiedProperties, goalNameByPriority, distribution, balancingConstraint, verifications));
p.add(params(modifiedProperties, kafkaAssignerGoals, distribution, balancingConstraint, kafkaAssignerVerifications));
}
return p;
}
use of com.linkedin.kafka.cruisecontrol.common.ClusterProperty in project cruise-control by linkedin.
the class RandomSelfHealingTest method data.
/**
* Populate parameters for the {@link OptimizationVerifier}. All brokers are alive.
*
* @return Parameters for the {@link OptimizationVerifier}.
*/
@Parameters(name = "{1}-{0}")
public static Collection<Object[]> data() {
Collection<Object[]> p = new ArrayList<>();
Map<Integer, String> goalNameByPriority = new HashMap<>();
goalNameByPriority.put(1, RackAwareGoal.class.getName());
goalNameByPriority.put(2, ReplicaCapacityGoal.class.getName());
goalNameByPriority.put(3, DiskCapacityGoal.class.getName());
goalNameByPriority.put(4, NetworkInboundCapacityGoal.class.getName());
goalNameByPriority.put(5, NetworkOutboundCapacityGoal.class.getName());
goalNameByPriority.put(6, CpuCapacityGoal.class.getName());
goalNameByPriority.put(7, ReplicaDistributionGoal.class.getName());
goalNameByPriority.put(8, PotentialNwOutGoal.class.getName());
goalNameByPriority.put(9, DiskUsageDistributionGoal.class.getName());
goalNameByPriority.put(10, NetworkInboundUsageDistributionGoal.class.getName());
goalNameByPriority.put(11, NetworkOutboundUsageDistributionGoal.class.getName());
goalNameByPriority.put(12, CpuUsageDistributionGoal.class.getName());
goalNameByPriority.put(13, TopicReplicaDistributionGoal.class.getName());
Map<Integer, String> kafkaAssignerGoals = new HashMap<>();
kafkaAssignerGoals.put(0, KafkaAssignerEvenRackAwareGoal.class.getName());
kafkaAssignerGoals.put(1, KafkaAssignerDiskUsageDistributionGoal.class.getName());
Properties props = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(2000L));
BalancingConstraint balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
balancingConstraint.setResourceBalancePercentage(TestConstants.LOW_BALANCE_PERCENTAGE);
balancingConstraint.setCapacityThreshold(TestConstants.MEDIUM_CAPACITY_THRESHOLD);
List<OptimizationVerifier.Verification> verifications = Arrays.asList(NEW_BROKERS, DEAD_BROKERS, REGRESSION);
List<OptimizationVerifier.Verification> kafkaAssignerVerifications = Arrays.asList(DEAD_BROKERS, REGRESSION, GOAL_VIOLATION);
// -- TEST DECK #1: SINGLE DEAD BROKER.
// Test: Single Goal.
Map<ClusterProperty, Number> singleDeadBroker = new HashMap<>();
singleDeadBroker.put(ClusterProperty.NUM_DEAD_BROKERS, 1);
int testId = 0;
for (Map.Entry<Integer, String> entry : goalNameByPriority.entrySet()) {
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(entry.getKey(), entry.getValue()), balancingConstraint, Collections.emptySet(), verifications, true));
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(entry.getKey(), entry.getValue()), balancingConstraint, Collections.emptySet(), verifications, false));
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(entry.getKey(), entry.getValue()), balancingConstraint, Collections.singleton("T0"), verifications, true));
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(entry.getKey(), entry.getValue()), balancingConstraint, Collections.singleton("T0"), verifications, false));
}
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(0, KafkaAssignerEvenRackAwareGoal.class.getName()), balancingConstraint, Collections.emptySet(), kafkaAssignerVerifications, true));
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(0, KafkaAssignerEvenRackAwareGoal.class.getName()), balancingConstraint, Collections.emptySet(), kafkaAssignerVerifications, false));
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(0, KafkaAssignerEvenRackAwareGoal.class.getName()), balancingConstraint, Collections.singleton("T0"), kafkaAssignerVerifications, true));
p.add(params(testId++, singleDeadBroker, Collections.singletonMap(0, KafkaAssignerEvenRackAwareGoal.class.getName()), balancingConstraint, Collections.singleton("T0"), kafkaAssignerVerifications, false));
props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(5100L));
balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
balancingConstraint.setResourceBalancePercentage(TestConstants.LOW_BALANCE_PERCENTAGE);
balancingConstraint.setCapacityThreshold(TestConstants.MEDIUM_CAPACITY_THRESHOLD);
// Test: All Goals.
p.add(params(testId++, singleDeadBroker, goalNameByPriority, balancingConstraint, Collections.emptySet(), verifications, true));
p.add(params(testId++, singleDeadBroker, goalNameByPriority, balancingConstraint, Collections.singleton("T0"), verifications, true));
p.add(params(testId++, singleDeadBroker, kafkaAssignerGoals, balancingConstraint, Collections.emptySet(), kafkaAssignerVerifications, true));
p.add(params(testId++, singleDeadBroker, kafkaAssignerGoals, balancingConstraint, Collections.singleton("T0"), kafkaAssignerVerifications, true));
// -- TEST DECK #2: MULTIPLE DEAD BROKERS.
// Test: Single Goal.
Map<ClusterProperty, Number> multipleDeadBrokers = new HashMap<>();
multipleDeadBrokers.put(ClusterProperty.NUM_DEAD_BROKERS, 5);
for (Map.Entry<Integer, String> entry : goalNameByPriority.entrySet()) {
p.add(params(testId++, multipleDeadBrokers, Collections.singletonMap(entry.getKey(), entry.getValue()), balancingConstraint, Collections.emptySet(), verifications, true));
p.add(params(testId++, multipleDeadBrokers, Collections.singletonMap(entry.getKey(), entry.getValue()), balancingConstraint, Collections.singleton("T0"), verifications, true));
}
p.add(params(testId++, multipleDeadBrokers, Collections.singletonMap(0, KafkaAssignerEvenRackAwareGoal.class.getName()), balancingConstraint, Collections.emptySet(), kafkaAssignerVerifications, true));
p.add(params(testId++, multipleDeadBrokers, Collections.singletonMap(0, KafkaAssignerEvenRackAwareGoal.class.getName()), balancingConstraint, Collections.singleton("T0"), kafkaAssignerVerifications, true));
// Test: All Goals.
p.add(params(testId++, multipleDeadBrokers, goalNameByPriority, balancingConstraint, Collections.emptySet(), verifications, true));
p.add(params(testId++, multipleDeadBrokers, goalNameByPriority, balancingConstraint, Collections.singleton("T0"), verifications, true));
p.add(params(testId++, multipleDeadBrokers, kafkaAssignerGoals, balancingConstraint, Collections.emptySet(), kafkaAssignerVerifications, true));
p.add(params(testId++, multipleDeadBrokers, kafkaAssignerGoals, balancingConstraint, Collections.singleton("T0"), kafkaAssignerVerifications, true));
return p;
}
use of com.linkedin.kafka.cruisecontrol.common.ClusterProperty in project cruise-control by linkedin.
the class RandomClusterTest method rebalance.
private ClusterModel rebalance() throws Exception {
// Create cluster properties by applying modified properties to base properties.
Map<ClusterProperty, Number> clusterProperties = new HashMap<>(TestConstants.BASE_PROPERTIES);
clusterProperties.putAll(_modifiedProperties);
LOG.debug("Replica distribution: {}.", _replicaDistribution);
ClusterModel clusterModel = RandomCluster.generate(clusterProperties);
RandomCluster.populate(clusterModel, clusterProperties, _replicaDistribution);
assertTrue("Random Cluster Test failed to improve the existing state.", OptimizationVerifier.executeGoalsFor(_balancingConstraint, clusterModel, _goalNameByPriority, _verifications));
return clusterModel;
}
use of com.linkedin.kafka.cruisecontrol.common.ClusterProperty in project cruise-control by linkedin.
the class RandomGoalTest method test.
@Test
public void test() throws Exception {
// Create cluster properties by applying modified properties to base properties.
Map<ClusterProperty, Number> clusterProperties = new HashMap<>(TestConstants.BASE_PROPERTIES);
clusterProperties.putAll(_modifiedProperties);
LOG.debug("Replica distribution: {} || Goals: {}.", TestConstants.Distribution.EXPONENTIAL, _goalNameByPriority);
ClusterModel clusterModel = RandomCluster.generate(clusterProperties);
RandomCluster.populate(clusterModel, clusterProperties, TestConstants.Distribution.EXPONENTIAL);
assertTrue("Random Goal Test failed to improve the existing state.", OptimizationVerifier.executeGoalsFor(_balancingConstraint, clusterModel, _goalNameByPriority, _verifications));
}
use of com.linkedin.kafka.cruisecontrol.common.ClusterProperty in project cruise-control by linkedin.
the class RandomSelfHealingTest method test.
@Test
public void test() throws Exception {
// Create cluster properties by applying modified properties to base properties.
Map<ClusterProperty, Number> clusterProperties = new HashMap<>(TestConstants.BASE_PROPERTIES);
clusterProperties.putAll(_modifiedProperties);
LOG.debug("Replica distribution: {}.", TestConstants.Distribution.UNIFORM);
ClusterModel clusterModel = RandomCluster.generate(clusterProperties);
RandomCluster.populate(clusterModel, clusterProperties, TestConstants.Distribution.UNIFORM, true, _leaderInFirstPosition, _excludedTopics);
assertTrue("Self Healing Test failed to improve the existing state.", OptimizationVerifier.executeGoalsFor(_balancingConstraint, clusterModel, _goalNameByPriority, _excludedTopics, _verifications));
}
Aggregations