Search in sources :

Example 26 with ClusterModel

use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.

the class RandomClusterTest method rebalance.

private ClusterModel rebalance() throws Exception {
    // Create cluster properties by applying modified properties to base properties.
    Map<ClusterProperty, Number> clusterProperties = new HashMap<>(TestConstants.BASE_PROPERTIES);
    clusterProperties.putAll(_modifiedProperties);
    LOG.debug("Replica distribution: {}.", _replicaDistribution);
    ClusterModel clusterModel = RandomCluster.generate(clusterProperties);
    RandomCluster.populate(clusterModel, clusterProperties, _replicaDistribution);
    assertTrue("Random Cluster Test failed to improve the existing state.", OptimizationVerifier.executeGoalsFor(_balancingConstraint, clusterModel, _goalNameByPriority, _verifications));
    return clusterModel;
}
Also used : ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) HashMap(java.util.HashMap) ClusterProperty(com.linkedin.kafka.cruisecontrol.common.ClusterProperty)

Example 27 with ClusterModel

use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.

the class RandomClusterTest method testNewBrokers.

/**
 * This test first creates a random cluster, balance it. Then add two new brokers, balance the cluster again.
 */
public void testNewBrokers() throws Exception {
    ClusterModel clusterModel = rebalance();
    ClusterModel clusterWithNewBroker = new ClusterModel(new ModelGeneration(0, 0L), 1.0);
    for (Broker b : clusterModel.brokers()) {
        clusterWithNewBroker.createRack(b.rack().id());
        Map<Resource, Double> brokerCapacity = new HashMap<>();
        for (Resource r : Resource.cachedValues()) {
            brokerCapacity.put(r, b.capacityFor(r));
        }
        clusterWithNewBroker.createBroker(b.rack().id(), Integer.toString(b.id()), b.id(), brokerCapacity);
    }
    for (Map.Entry<String, List<Partition>> entry : clusterModel.getPartitionsByTopic().entrySet()) {
        for (Partition p : entry.getValue()) {
            int index = 0;
            for (Replica r : p.replicas()) {
                clusterWithNewBroker.createReplica(r.broker().rack().id(), r.broker().id(), p.topicPartition(), index++, r.isLeader());
            }
        }
    }
    for (Broker b : clusterModel.brokers()) {
        for (Replica replica : b.replicas()) {
            AggregatedMetricValues aggregatedMetricValues = clusterModel.broker(b.id()).replica(replica.topicPartition()).load().loadByWindows();
            clusterWithNewBroker.setReplicaLoad(b.rack().id(), b.id(), replica.topicPartition(), aggregatedMetricValues, clusterModel.load().windows());
        }
    }
    for (int i = 1; i < 3; i++) {
        clusterWithNewBroker.createBroker(Integer.toString(i), Integer.toString(i + clusterModel.brokers().size() - 1), i + clusterModel.brokers().size() - 1, TestConstants.BROKER_CAPACITY);
        clusterWithNewBroker.setBrokerState(i + clusterModel.brokers().size() - 1, Broker.State.NEW);
    }
    assertTrue("Random Cluster Test failed to improve the existing state with new brokers.", OptimizationVerifier.executeGoalsFor(_balancingConstraint, clusterWithNewBroker, _goalNameByPriority, _verifications));
}
Also used : Partition(com.linkedin.kafka.cruisecontrol.model.Partition) Broker(com.linkedin.kafka.cruisecontrol.model.Broker) HashMap(java.util.HashMap) Resource(com.linkedin.kafka.cruisecontrol.common.Resource) AggregatedMetricValues(com.linkedin.cruisecontrol.monitor.sampling.aggregator.AggregatedMetricValues) Replica(com.linkedin.kafka.cruisecontrol.model.Replica) ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) ModelGeneration(com.linkedin.kafka.cruisecontrol.monitor.ModelGeneration) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Example 28 with ClusterModel

use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.

the class RandomGoalTest method test.

@Test
public void test() throws Exception {
    // Create cluster properties by applying modified properties to base properties.
    Map<ClusterProperty, Number> clusterProperties = new HashMap<>(TestConstants.BASE_PROPERTIES);
    clusterProperties.putAll(_modifiedProperties);
    LOG.debug("Replica distribution: {} || Goals: {}.", TestConstants.Distribution.EXPONENTIAL, _goalNameByPriority);
    ClusterModel clusterModel = RandomCluster.generate(clusterProperties);
    RandomCluster.populate(clusterModel, clusterProperties, TestConstants.Distribution.EXPONENTIAL);
    assertTrue("Random Goal Test failed to improve the existing state.", OptimizationVerifier.executeGoalsFor(_balancingConstraint, clusterModel, _goalNameByPriority, _verifications));
}
Also used : ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) HashMap(java.util.HashMap) ClusterProperty(com.linkedin.kafka.cruisecontrol.common.ClusterProperty) Test(org.junit.Test)

Example 29 with ClusterModel

use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.

the class RandomSelfHealingTest method test.

@Test
public void test() throws Exception {
    // Create cluster properties by applying modified properties to base properties.
    Map<ClusterProperty, Number> clusterProperties = new HashMap<>(TestConstants.BASE_PROPERTIES);
    clusterProperties.putAll(_modifiedProperties);
    LOG.debug("Replica distribution: {}.", TestConstants.Distribution.UNIFORM);
    ClusterModel clusterModel = RandomCluster.generate(clusterProperties);
    RandomCluster.populate(clusterModel, clusterProperties, TestConstants.Distribution.UNIFORM, true, _leaderInFirstPosition, _excludedTopics);
    assertTrue("Self Healing Test failed to improve the existing state.", OptimizationVerifier.executeGoalsFor(_balancingConstraint, clusterModel, _goalNameByPriority, _excludedTopics, _verifications));
}
Also used : ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) HashMap(java.util.HashMap) ClusterProperty(com.linkedin.kafka.cruisecontrol.common.ClusterProperty) Test(org.junit.Test)

Example 30 with ClusterModel

use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.

the class KafkaAssignerDiskUsageDistributionGoalTest method createClusterModel.

/**
 * The replica distribution is as below.
 *
 * L - Leader
 * F - Follower
 * S - Secondary Follower
 *
 *         r0             r1         r2          r3
 *        /  \             |          |           |
 *      b0    b1          b2         b3          b4
 *   T0P0(L)  T0P1(L)   T0P2(L)    T1P0(L)     T1P1(L)
 *   T1P2(L)  T2P0(L)   T2P1(L)    T2P2(L)     T0P0(F)
 *   T0P2(F)  T1P0(F)   T0P1(F)    T1P1(F)     T1P2(F)
 *   T2P1(F)  T2P2(F)   T2P0(F)    T0P0(S)     T0P1(S)
 *   T1P1(S)            T1P0(S)    T0P2(S)     T2P0(S)
 *                      T1P2(S)    T2P1(S)     T2P2(S)
 * The sizes of each broker are:
 * b0: 190
 * b1: 260
 * b2: 360
 * b3: 250
 * b4: 290
 *
 * The average broker size should be: 270
 */
private ClusterModel createClusterModel() {
    Map<TopicPartition, Float> partitionSize = new HashMap<>();
    partitionSize.put(T0P0, 10f);
    partitionSize.put(T0P1, 90f);
    partitionSize.put(T0P2, 20f);
    partitionSize.put(T1P0, 80f);
    partitionSize.put(T1P1, 30f);
    partitionSize.put(T1P2, 70f);
    partitionSize.put(T2P0, 40f);
    partitionSize.put(T2P1, 60f);
    partitionSize.put(T2P2, 50f);
    final int numRacks = 4;
    ClusterModel clusterModel = new ClusterModel(new ModelGeneration(0, 0), 1.0);
    for (int i = 0; i < numRacks; i++) {
        clusterModel.createRack("r" + i);
    }
    int i = 0;
    for (; i < 2; i++) {
        clusterModel.createBroker("r0", "h" + i, i, TestConstants.BROKER_CAPACITY);
    }
    for (int j = 1; j < numRacks; j++, i++) {
        clusterModel.createBroker("r" + j, "h" + i, i, TestConstants.BROKER_CAPACITY);
    }
    clusterModel.createReplica("r0", 0, T0P0, 0, true);
    clusterModel.createReplica("r0", 0, T1P2, 0, true);
    clusterModel.createReplica("r0", 1, T0P1, 0, true);
    clusterModel.createReplica("r0", 1, T2P0, 0, true);
    clusterModel.createReplica("r1", 2, T0P2, 0, true);
    clusterModel.createReplica("r1", 2, T2P1, 0, true);
    clusterModel.createReplica("r2", 3, T1P0, 0, true);
    clusterModel.createReplica("r2", 3, T2P2, 0, true);
    clusterModel.createReplica("r3", 4, T1P1, 0, true);
    clusterModel.createReplica("r0", 0, T0P2, 1, false);
    clusterModel.createReplica("r0", 0, T2P1, 1, false);
    clusterModel.createReplica("r0", 1, T1P0, 1, false);
    clusterModel.createReplica("r0", 1, T2P2, 1, false);
    clusterModel.createReplica("r1", 2, T0P1, 1, false);
    clusterModel.createReplica("r1", 2, T2P0, 1, false);
    clusterModel.createReplica("r2", 3, T1P1, 1, false);
    clusterModel.createReplica("r3", 4, T0P0, 1, false);
    clusterModel.createReplica("r3", 4, T1P2, 1, false);
    clusterModel.createReplica("r0", 0, T1P1, 2, false);
    clusterModel.createReplica("r1", 2, T1P0, 2, false);
    clusterModel.createReplica("r1", 2, T1P2, 2, false);
    clusterModel.createReplica("r2", 3, T0P0, 2, false);
    clusterModel.createReplica("r2", 3, T0P2, 2, false);
    clusterModel.createReplica("r2", 3, T2P1, 2, false);
    clusterModel.createReplica("r3", 4, T0P1, 2, false);
    clusterModel.createReplica("r3", 4, T2P0, 2, false);
    clusterModel.createReplica("r3", 4, T2P2, 2, false);
    List<Long> windows = Collections.singletonList(1L);
    clusterModel.setReplicaLoad("r0", 0, T0P0, getAggregatedMetricValues(partitionSize.get(T0P0)), windows);
    clusterModel.setReplicaLoad("r0", 0, T1P2, getAggregatedMetricValues(partitionSize.get(T1P2)), windows);
    clusterModel.setReplicaLoad("r0", 0, T0P2, getAggregatedMetricValues(partitionSize.get(T0P2)), windows);
    clusterModel.setReplicaLoad("r0", 0, T2P1, getAggregatedMetricValues(partitionSize.get(T2P1)), windows);
    clusterModel.setReplicaLoad("r0", 0, T1P1, getAggregatedMetricValues(partitionSize.get(T1P1)), windows);
    clusterModel.setReplicaLoad("r0", 1, T0P1, getAggregatedMetricValues(partitionSize.get(T0P1)), windows);
    clusterModel.setReplicaLoad("r0", 1, T2P0, getAggregatedMetricValues(partitionSize.get(T2P0)), windows);
    clusterModel.setReplicaLoad("r0", 1, T1P0, getAggregatedMetricValues(partitionSize.get(T1P0)), windows);
    clusterModel.setReplicaLoad("r0", 1, T2P2, getAggregatedMetricValues(partitionSize.get(T2P2)), windows);
    clusterModel.setReplicaLoad("r1", 2, T0P2, getAggregatedMetricValues(partitionSize.get(T0P2)), windows);
    clusterModel.setReplicaLoad("r1", 2, T2P1, getAggregatedMetricValues(partitionSize.get(T2P1)), windows);
    clusterModel.setReplicaLoad("r1", 2, T0P1, getAggregatedMetricValues(partitionSize.get(T0P1)), windows);
    clusterModel.setReplicaLoad("r1", 2, T2P0, getAggregatedMetricValues(partitionSize.get(T2P0)), windows);
    clusterModel.setReplicaLoad("r1", 2, T1P0, getAggregatedMetricValues(partitionSize.get(T1P0)), windows);
    clusterModel.setReplicaLoad("r1", 2, T1P2, getAggregatedMetricValues(partitionSize.get(T1P2)), windows);
    clusterModel.setReplicaLoad("r2", 3, T1P0, getAggregatedMetricValues(partitionSize.get(T1P0)), windows);
    clusterModel.setReplicaLoad("r2", 3, T2P2, getAggregatedMetricValues(partitionSize.get(T2P2)), windows);
    clusterModel.setReplicaLoad("r2", 3, T1P1, getAggregatedMetricValues(partitionSize.get(T1P1)), windows);
    clusterModel.setReplicaLoad("r2", 3, T0P0, getAggregatedMetricValues(partitionSize.get(T0P0)), windows);
    clusterModel.setReplicaLoad("r2", 3, T0P2, getAggregatedMetricValues(partitionSize.get(T0P2)), windows);
    clusterModel.setReplicaLoad("r2", 3, T2P1, getAggregatedMetricValues(partitionSize.get(T2P1)), windows);
    clusterModel.setReplicaLoad("r3", 4, T1P1, getAggregatedMetricValues(partitionSize.get(T1P1)), windows);
    clusterModel.setReplicaLoad("r3", 4, T0P0, getAggregatedMetricValues(partitionSize.get(T0P0)), windows);
    clusterModel.setReplicaLoad("r3", 4, T1P2, getAggregatedMetricValues(partitionSize.get(T1P2)), windows);
    clusterModel.setReplicaLoad("r3", 4, T0P1, getAggregatedMetricValues(partitionSize.get(T0P1)), windows);
    clusterModel.setReplicaLoad("r3", 4, T2P0, getAggregatedMetricValues(partitionSize.get(T2P0)), windows);
    clusterModel.setReplicaLoad("r3", 4, T2P2, getAggregatedMetricValues(partitionSize.get(T2P2)), windows);
    return clusterModel;
}
Also used : ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) HashMap(java.util.HashMap) ModelGeneration(com.linkedin.kafka.cruisecontrol.monitor.ModelGeneration) TopicPartition(org.apache.kafka.common.TopicPartition) BalancingConstraint(com.linkedin.kafka.cruisecontrol.analyzer.BalancingConstraint)

Aggregations

ClusterModel (com.linkedin.kafka.cruisecontrol.model.ClusterModel)38 TopicPartition (org.apache.kafka.common.TopicPartition)12 ModelCompletenessRequirements (com.linkedin.kafka.cruisecontrol.monitor.ModelCompletenessRequirements)11 Test (org.junit.Test)11 BalancingConstraint (com.linkedin.kafka.cruisecontrol.analyzer.BalancingConstraint)10 Replica (com.linkedin.kafka.cruisecontrol.model.Replica)10 Broker (com.linkedin.kafka.cruisecontrol.model.Broker)9 ClusterModelStats (com.linkedin.kafka.cruisecontrol.model.ClusterModelStats)9 List (java.util.List)9 HashSet (java.util.HashSet)8 Goal (com.linkedin.kafka.cruisecontrol.analyzer.goals.Goal)7 Resource (com.linkedin.kafka.cruisecontrol.common.Resource)7 Set (java.util.Set)7 Logger (org.slf4j.Logger)7 LoggerFactory (org.slf4j.LoggerFactory)7 ActionAcceptance (com.linkedin.kafka.cruisecontrol.analyzer.ActionAcceptance)6 BalancingAction (com.linkedin.kafka.cruisecontrol.analyzer.BalancingAction)6 OperationProgress (com.linkedin.kafka.cruisecontrol.async.progress.OperationProgress)6 ArrayList (java.util.ArrayList)6 Comparator (java.util.Comparator)6