Search in sources :

Example 1 with Partition

use of com.linkedin.kafka.cruisecontrol.model.Partition in project cruise-control by linkedin.

the class KafkaAssignerEvenRackAwareGoal method initGoalState.

/**
 * Sanity Check: There exists sufficient number of racks for achieving rack-awareness.
 * 1. Initialize partitions by topic.
 * 2. Initialize the number of excluded replicas by position for each broker.
 * 3. Initialize the healthy broker replica count by position.
 *
 * @param clusterModel The state of the cluster.
 * @param excludedTopics The topics that should be excluded from the optimization action.
 */
private void initGoalState(ClusterModel clusterModel, Set<String> excludedTopics) throws OptimizationFailureException {
    // Sanity check: Ensure that rack awareness is satisfiable.
    ensureRackAwareSatisfiable(clusterModel, excludedTopics);
    // 1. Initialize partitions by topic.
    _partitionsByTopic = clusterModel.getPartitionsByTopic();
    // 2. Initialize the number of excluded replicas by position for each broker.
    Map<Integer, Map<Integer, Integer>> numExcludedReplicasByPositionInBroker = new HashMap<>();
    clusterModel.brokers().forEach(broker -> numExcludedReplicasByPositionInBroker.put(broker.id(), new HashMap<>()));
    for (String excludedTopic : excludedTopics) {
        for (Partition partition : _partitionsByTopic.get(excludedTopic)) {
            // Add 1 to the number of excluded replicas in relevant position for the broker that the replica resides in.
            // Leader is at position 0.
            int position = 0;
            numExcludedReplicasByPositionInBroker.get(partition.leader().broker().id()).merge(position, 1, Integer::sum);
            // Followers are ordered in positions [1, numFollowers].
            for (Broker followerBroker : partition.followerBrokers()) {
                position++;
                numExcludedReplicasByPositionInBroker.get(followerBroker.id()).merge(position, 1, Integer::sum);
            }
        }
    }
    // 3. Initialize the healthy broker replica count by position.
    int maxReplicationFactor = clusterModel.maxReplicationFactor();
    for (int i = 0; i < maxReplicationFactor; i++) {
        SortedSet<BrokerReplicaCount> healthyBrokersByReplicaCount = new TreeSet<>();
        for (Broker broker : clusterModel.healthyBrokers()) {
            int numExcludedReplicasInPosition = numExcludedReplicasByPositionInBroker.get(broker.id()).getOrDefault(i, 0);
            BrokerReplicaCount brokerReplicaCount = new BrokerReplicaCount(broker, numExcludedReplicasInPosition);
            healthyBrokersByReplicaCount.add(brokerReplicaCount);
        }
        _healthyBrokerReplicaCountByPosition.put(i, healthyBrokersByReplicaCount);
    }
}
Also used : Partition(com.linkedin.kafka.cruisecontrol.model.Partition) Broker(com.linkedin.kafka.cruisecontrol.model.Broker) HashMap(java.util.HashMap) TreeSet(java.util.TreeSet) HashMap(java.util.HashMap) Map(java.util.Map)

Example 2 with Partition

use of com.linkedin.kafka.cruisecontrol.model.Partition in project cruise-control by linkedin.

the class KafkaCruiseControlServlet method getPartitionLoad.

private boolean getPartitionLoad(HttpServletRequest request, HttpServletResponse response) throws Exception {
    Resource resource;
    Long startMs;
    Long endMs;
    boolean json = wantJSON(request);
    try {
        String resourceString = request.getParameter(RESOURCE_PARAM);
        try {
            if (resourceString == null) {
                resourceString = DEFAULT_PARTITION_LOAD_RESOURCE;
            }
            resource = Resource.valueOf(resourceString.toUpperCase());
        } catch (IllegalArgumentException iae) {
            String errorMsg = String.format("Invalid resource type %s. The resource type must be one of the following: " + "CPU, DISK, NW_IN, NW_OUT", resourceString);
            StringWriter sw = new StringWriter();
            iae.printStackTrace(new PrintWriter(sw));
            setErrorResponse(response, sw.toString(), errorMsg, SC_BAD_REQUEST, json);
            // Close session
            return true;
        }
        String startMsString = request.getParameter(START_MS_PARAM);
        String endMsString = request.getParameter(END_MS_PARAM);
        startMs = startMsString == null ? -1L : Long.parseLong(startMsString);
        endMs = endMsString == null ? System.currentTimeMillis() : Long.parseLong(endMsString);
    } catch (Exception e) {
        StringWriter sw = new StringWriter();
        e.printStackTrace(new PrintWriter(sw));
        setErrorResponse(response, sw.toString(), e.getMessage(), SC_BAD_REQUEST, json);
        // Close session
        return true;
    }
    ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false);
    // Get cluster model asynchronously.
    ClusterModel clusterModel = getAndMaybeReturnProgress(request, response, () -> _asyncKafkaCruiseControl.clusterModel(startMs, endMs, requirements));
    if (clusterModel == null) {
        return false;
    }
    List<Partition> sortedPartitions = clusterModel.replicasSortedByUtilization(resource);
    OutputStream out = response.getOutputStream();
    String entriesString = request.getParameter(ENTRIES);
    Integer entries = entriesString == null ? Integer.MAX_VALUE : Integer.parseInt(entriesString);
    int numEntries = 0;
    if (!json) {
        int topicNameLength = clusterModel.topics().stream().mapToInt(String::length).max().orElse(20) + 5;
        setResponseCode(response, SC_OK);
        out.write(String.format("%" + topicNameLength + "s%10s%30s%20s%20s%20s%20s%n", "PARTITION", "LEADER", "FOLLOWERS", "CPU (%)", "DISK (MB)", "NW_IN (KB/s)", "NW_OUT (KB/s)").getBytes(StandardCharsets.UTF_8));
        for (Partition p : sortedPartitions) {
            if (++numEntries > entries) {
                break;
            }
            List<Integer> followers = p.followers().stream().map((replica) -> replica.broker().id()).collect(Collectors.toList());
            out.write(String.format("%" + topicNameLength + "s%10s%30s%19.6f%19.3f%19.3f%19.3f%n", p.leader().topicPartition(), p.leader().broker().id(), followers, p.leader().load().expectedUtilizationFor(Resource.CPU), p.leader().load().expectedUtilizationFor(Resource.DISK), p.leader().load().expectedUtilizationFor(Resource.NW_IN), p.leader().load().expectedUtilizationFor(Resource.NW_OUT)).getBytes(StandardCharsets.UTF_8));
        }
    } else {
        Map<String, Object> partitionMap = new HashMap<>();
        List<Object> partitionList = new ArrayList<>();
        List<String> header = new ArrayList<>(Arrays.asList("topic", "partition", "leader", "followers", "CPU", "DISK", "NW_IN", "NW_OUT"));
        partitionMap.put("version", JSON_VERSION);
        partitionMap.put("header", header);
        for (Partition p : sortedPartitions) {
            if (++numEntries > entries) {
                break;
            }
            List<Integer> followers = p.followers().stream().map((replica) -> replica.broker().id()).collect(Collectors.toList());
            List<Object> record = new ArrayList<>();
            record.add(p.leader().topicPartition().topic());
            record.add(p.leader().topicPartition().partition());
            record.add(p.leader().broker().id());
            record.add(followers);
            record.add(p.leader().load().expectedUtilizationFor(Resource.CPU));
            record.add(p.leader().load().expectedUtilizationFor(Resource.DISK));
            record.add(p.leader().load().expectedUtilizationFor(Resource.NW_IN));
            record.add(p.leader().load().expectedUtilizationFor(Resource.NW_OUT));
            partitionList.add(record);
        }
        partitionMap.put("records", partitionList);
        Gson gson = new Gson();
        String g = gson.toJson(partitionMap);
        setJSONResponseCode(response, SC_OK);
        response.setContentLength(g.length());
        out.write(g.getBytes(StandardCharsets.UTF_8));
    }
    out.flush();
    return true;
}
Also used : Arrays(java.util.Arrays) SortedSet(java.util.SortedSet) LoggerFactory(org.slf4j.LoggerFactory) TimeoutException(java.util.concurrent.TimeoutException) GsonBuilder(com.google.gson.GsonBuilder) OperationFuture(com.linkedin.kafka.cruisecontrol.async.OperationFuture) Cluster(org.apache.kafka.common.Cluster) Gson(com.google.gson.Gson) Map(java.util.Map) ExecutorState(com.linkedin.kafka.cruisecontrol.executor.ExecutorState) KafkaClusterState(com.linkedin.kafka.cruisecontrol.KafkaClusterState) GoalOptimizer(com.linkedin.kafka.cruisecontrol.analyzer.GoalOptimizer) PrintWriter(java.io.PrintWriter) TopicPartition(org.apache.kafka.common.TopicPartition) HttpServlet(javax.servlet.http.HttpServlet) Time(org.apache.kafka.common.utils.Time) ExecutionTask(com.linkedin.kafka.cruisecontrol.executor.ExecutionTask) EndPoint(com.linkedin.kafka.cruisecontrol.servlet.KafkaCruiseControlServlet.EndPoint) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) KafkaAssignerEvenRackAwareGoal(com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerEvenRackAwareGoal) Collectors(java.util.stream.Collectors) StandardCharsets(java.nio.charset.StandardCharsets) SC_NOT_FOUND(javax.servlet.http.HttpServletResponse.SC_NOT_FOUND) Goal(com.linkedin.kafka.cruisecontrol.analyzer.goals.Goal) List(java.util.List) AsyncKafkaCruiseControl(com.linkedin.kafka.cruisecontrol.async.AsyncKafkaCruiseControl) Resource(com.linkedin.kafka.cruisecontrol.common.Resource) ClusterModelStats(com.linkedin.kafka.cruisecontrol.model.ClusterModelStats) VALID_WINDOWS(com.linkedin.kafka.cruisecontrol.servlet.KafkaCruiseControlServlet.DataFrom.VALID_WINDOWS) SC_OK(javax.servlet.http.HttpServletResponse.SC_OK) Node(org.apache.kafka.common.Node) UnsupportedEncodingException(java.io.UnsupportedEncodingException) SortedMap(java.util.SortedMap) SC_INTERNAL_SERVER_ERROR(javax.servlet.http.HttpServletResponse.SC_INTERNAL_SERVER_ERROR) ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) HashMap(java.util.HashMap) Supplier(java.util.function.Supplier) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) HttpServletRequest(javax.servlet.http.HttpServletRequest) ServletOutputStream(javax.servlet.ServletOutputStream) OutputStreamWriter(java.io.OutputStreamWriter) Partition(com.linkedin.kafka.cruisecontrol.model.Partition) OutputStream(java.io.OutputStream) MetricRegistry(com.codahale.metrics.MetricRegistry) Logger(org.slf4j.Logger) KafkaCruiseControlState(com.linkedin.kafka.cruisecontrol.KafkaCruiseControlState) StringWriter(java.io.StringWriter) HttpServletResponse(javax.servlet.http.HttpServletResponse) IOException(java.io.IOException) SampleExtrapolation(com.linkedin.kafka.cruisecontrol.monitor.sampling.aggregator.SampleExtrapolation) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) URLEncoder(java.net.URLEncoder) TreeMap(java.util.TreeMap) StringJoiner(java.util.StringJoiner) SC_BAD_REQUEST(javax.servlet.http.HttpServletResponse.SC_BAD_REQUEST) Comparator(java.util.Comparator) Collections(java.util.Collections) KafkaAssignerDiskUsageDistributionGoal(com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerDiskUsageDistributionGoal) ModelCompletenessRequirements(com.linkedin.kafka.cruisecontrol.monitor.ModelCompletenessRequirements) TopicPartition(org.apache.kafka.common.TopicPartition) Partition(com.linkedin.kafka.cruisecontrol.model.Partition) HashMap(java.util.HashMap) ServletOutputStream(javax.servlet.ServletOutputStream) OutputStream(java.io.OutputStream) Resource(com.linkedin.kafka.cruisecontrol.common.Resource) ArrayList(java.util.ArrayList) Gson(com.google.gson.Gson) TimeoutException(java.util.concurrent.TimeoutException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) EndPoint(com.linkedin.kafka.cruisecontrol.servlet.KafkaCruiseControlServlet.EndPoint) ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) StringWriter(java.io.StringWriter) ModelCompletenessRequirements(com.linkedin.kafka.cruisecontrol.monitor.ModelCompletenessRequirements) PrintWriter(java.io.PrintWriter)

Example 3 with Partition

use of com.linkedin.kafka.cruisecontrol.model.Partition in project cruise-control by linkedin.

the class RandomCluster method markDeadBrokers.

/**
 * Mark dead brokers: Give priority to marking brokers containing excluded topic replicas. In particular, if the
 * leader is not in first position in partition replica list, then give priority to brokers containing excluded
 * topic replicas in the current position of the leader -- i.e. 1.
 *
 * @param cluster he state of the cluster.
 * @param numDeadBrokers Number of dead brokers.
 * @param excludedTopics Excluded topics.
 * @param leaderInFirstPosition Leader of each partition is in the first position or not.
 */
private static void markDeadBrokers(ClusterModel cluster, int numDeadBrokers, Set<String> excludedTopics, boolean leaderInFirstPosition) {
    if (numDeadBrokers > 0) {
        int markedBrokersContainingExcludedTopicReplicas = 0;
        // Find the brokers with high priority to mark as dead (if any). These brokers are sorted by their id.
        SortedMap<String, List<Partition>> partitionsByTopic = cluster.getPartitionsByTopic();
        SortedSet<Broker> brokersWithExcludedReplicas = new TreeSet<>();
        for (String excludedTopic : excludedTopics) {
            for (Partition excludedPartition : partitionsByTopic.get(excludedTopic)) {
                if (leaderInFirstPosition) {
                    brokersWithExcludedReplicas.addAll(excludedPartition.partitionBrokers());
                } else {
                    brokersWithExcludedReplicas.add(excludedPartition.replicas().get(1).broker());
                }
            }
        }
        // Mark the brokers with high priority as dead (if any).
        for (Broker brokerToMarkDead : brokersWithExcludedReplicas) {
            cluster.setBrokerState(brokerToMarkDead.id(), Broker.State.DEAD);
            if (++markedBrokersContainingExcludedTopicReplicas >= numDeadBrokers) {
                break;
            }
        }
        // Mark the remaining brokers as dead.
        int remainingDeadBrokerIndex = 0;
        while (numDeadBrokers - markedBrokersContainingExcludedTopicReplicas - remainingDeadBrokerIndex > 0) {
            if (cluster.broker(remainingDeadBrokerIndex).isAlive()) {
                cluster.setBrokerState(remainingDeadBrokerIndex, Broker.State.DEAD);
            }
            remainingDeadBrokerIndex++;
        }
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Partition(com.linkedin.kafka.cruisecontrol.model.Partition) Broker(com.linkedin.kafka.cruisecontrol.model.Broker) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) List(java.util.List)

Example 4 with Partition

use of com.linkedin.kafka.cruisecontrol.model.Partition in project cruise-control by linkedin.

the class RandomClusterTest method testNewBrokers.

/**
 * This test first creates a random cluster, balance it. Then add two new brokers, balance the cluster again.
 */
public void testNewBrokers() throws Exception {
    ClusterModel clusterModel = rebalance();
    ClusterModel clusterWithNewBroker = new ClusterModel(new ModelGeneration(0, 0L), 1.0);
    for (Broker b : clusterModel.brokers()) {
        clusterWithNewBroker.createRack(b.rack().id());
        Map<Resource, Double> brokerCapacity = new HashMap<>();
        for (Resource r : Resource.cachedValues()) {
            brokerCapacity.put(r, b.capacityFor(r));
        }
        clusterWithNewBroker.createBroker(b.rack().id(), Integer.toString(b.id()), b.id(), brokerCapacity);
    }
    for (Map.Entry<String, List<Partition>> entry : clusterModel.getPartitionsByTopic().entrySet()) {
        for (Partition p : entry.getValue()) {
            int index = 0;
            for (Replica r : p.replicas()) {
                clusterWithNewBroker.createReplica(r.broker().rack().id(), r.broker().id(), p.topicPartition(), index++, r.isLeader());
            }
        }
    }
    for (Broker b : clusterModel.brokers()) {
        for (Replica replica : b.replicas()) {
            AggregatedMetricValues aggregatedMetricValues = clusterModel.broker(b.id()).replica(replica.topicPartition()).load().loadByWindows();
            clusterWithNewBroker.setReplicaLoad(b.rack().id(), b.id(), replica.topicPartition(), aggregatedMetricValues, clusterModel.load().windows());
        }
    }
    for (int i = 1; i < 3; i++) {
        clusterWithNewBroker.createBroker(Integer.toString(i), Integer.toString(i + clusterModel.brokers().size() - 1), i + clusterModel.brokers().size() - 1, TestConstants.BROKER_CAPACITY);
        clusterWithNewBroker.setBrokerState(i + clusterModel.brokers().size() - 1, Broker.State.NEW);
    }
    assertTrue("Random Cluster Test failed to improve the existing state with new brokers.", OptimizationVerifier.executeGoalsFor(_balancingConstraint, clusterWithNewBroker, _goalNameByPriority, _verifications));
}
Also used : Partition(com.linkedin.kafka.cruisecontrol.model.Partition) Broker(com.linkedin.kafka.cruisecontrol.model.Broker) HashMap(java.util.HashMap) Resource(com.linkedin.kafka.cruisecontrol.common.Resource) AggregatedMetricValues(com.linkedin.cruisecontrol.monitor.sampling.aggregator.AggregatedMetricValues) Replica(com.linkedin.kafka.cruisecontrol.model.Replica) ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) ModelGeneration(com.linkedin.kafka.cruisecontrol.monitor.ModelGeneration) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Example 5 with Partition

use of com.linkedin.kafka.cruisecontrol.model.Partition in project cruise-control by linkedin.

the class RandomCluster method populate.

/**
 * Populate the given cluster with replicas having a certain load distribution using the given properties and
 * replica distribution. Balancing constraint sets the resources existing in the cluster at each broker.
 *
 * @param cluster             The state of the cluster.
 * @param properties          Representing the cluster properties as specified in {@link ClusterProperty}.
 * @param replicaDistribution The replica distribution showing the broker of each replica in the cluster.
 * @param rackAware           Whether the replicas should be rack aware or not.
 * @param leaderInFirstPosition Leader of each partition is in the first position or not.
 * @param excludedTopics      Excluded topics.
 */
public static void populate(ClusterModel cluster, Map<ClusterProperty, Number> properties, TestConstants.Distribution replicaDistribution, boolean rackAware, boolean leaderInFirstPosition, Set<String> excludedTopics) {
    // Sanity checks.
    int numBrokers = cluster.brokers().size();
    if (properties.get(ClusterProperty.MEAN_NW_IN).doubleValue() < 0 || properties.get(ClusterProperty.MEAN_NW_OUT).doubleValue() < 0 || properties.get(ClusterProperty.MEAN_DISK).doubleValue() < 0 || properties.get(ClusterProperty.MEAN_CPU).doubleValue() < 0 || properties.get(ClusterProperty.NUM_DEAD_BROKERS).intValue() < 0 || properties.get(ClusterProperty.NUM_TOPICS).intValue() <= 0 || properties.get(ClusterProperty.MIN_REPLICATION).intValue() > properties.get(ClusterProperty.MAX_REPLICATION).intValue() || (leaderInFirstPosition && properties.get(ClusterProperty.MIN_REPLICATION).intValue() < 2) || properties.get(ClusterProperty.MAX_REPLICATION).intValue() > numBrokers || properties.get(ClusterProperty.NUM_TOPICS).intValue() > properties.get(ClusterProperty.NUM_REPLICAS).intValue() || (properties.get(ClusterProperty.MIN_REPLICATION).intValue() == properties.get(ClusterProperty.MAX_REPLICATION).intValue() && properties.get(ClusterProperty.NUM_REPLICAS).intValue() % properties.get(ClusterProperty.MIN_REPLICATION).intValue() != 0)) {
        throw new IllegalArgumentException("Random cluster population failed due to bad input.");
    }
    // Generate topic to number of brokers and replicas distribution.
    List<TopicMetadata> metadata = new ArrayList<>();
    for (int i = 0; i < properties.get(ClusterProperty.NUM_TOPICS).intValue(); i++) {
        metadata.add(new TopicMetadata(i));
    }
    // Increase the replication factor.
    for (int i = 0; i < properties.get(ClusterProperty.NUM_TOPICS).intValue(); i++) {
        int randomReplicationFactor = uniformlyRandom(properties.get(ClusterProperty.MIN_REPLICATION).intValue(), properties.get(ClusterProperty.MAX_REPLICATION).intValue(), TestConstants.REPLICATION_SEED + i);
        metadata.get(i).setReplicationFactor(randomReplicationFactor);
        if (totalTopicReplicas(metadata) > properties.get(ClusterProperty.NUM_REPLICAS).intValue()) {
            // Rollback to minimum replicationFactor.
            metadata.get(i).setReplicationFactor(properties.get(ClusterProperty.MIN_REPLICATION).intValue());
        }
    }
    // Increase the number of leaders.
    int maxRandomLeaders = properties.get(ClusterProperty.NUM_REPLICAS).intValue() / properties.get(ClusterProperty.NUM_TOPICS).intValue();
    for (int i = 0; i < properties.get(ClusterProperty.NUM_TOPICS).intValue(); i++) {
        int oldNumTopicLeaders = metadata.get(i).numTopicLeaders();
        int randomTopicLeaderCount = uniformlyRandom(2, maxRandomLeaders, TestConstants.LEADER_SEED + i);
        metadata.get(i).setNumTopicLeaders(randomTopicLeaderCount);
        if (totalTopicReplicas(metadata) > properties.get(ClusterProperty.NUM_REPLICAS).intValue()) {
            // Rollback to previous number of topic leaders.
            metadata.get(i).setNumTopicLeaders(oldNumTopicLeaders);
        }
    }
    int totalTopicReplicas = totalTopicReplicas(metadata);
    // Fill in the remaining empty spots.
    while (totalTopicReplicas < properties.get(ClusterProperty.NUM_REPLICAS).intValue()) {
        for (int i = 0; i < properties.get(ClusterProperty.NUM_TOPICS).intValue(); i++) {
            metadata.get(i).incrementNumTopicLeaders();
            totalTopicReplicas = totalTopicReplicas(metadata);
            if (totalTopicReplicas > properties.get(ClusterProperty.NUM_REPLICAS).intValue()) {
                // Rollback to previous number of topic leaders.
                metadata.get(i).decrementNumTopicLeaders();
                totalTopicReplicas = totalTopicReplicas(metadata);
            }
            if (totalTopicReplicas == properties.get(ClusterProperty.NUM_REPLICAS).intValue()) {
                break;
            }
        }
    }
    // Create replicas and set their distribution
    int replicaIndex = 0;
    Map<Resource, Random> randomByResource = new HashMap<>();
    for (Resource resource : Resource.cachedValues()) {
        long seed = TestConstants.UTILIZATION_SEED_BY_RESOURCE.get(resource);
        randomByResource.put(resource, new Random(seed));
    }
    Random randomForTopicPopularity = new Random(TestConstants.TOPIC_POPULARITY_SEED);
    for (TopicMetadata datum : metadata) {
        double topicPopularity = exponentialRandom(1.0, randomForTopicPopularity);
        String topic = datum.topic();
        for (int i = 1; i <= datum.numTopicLeaders(); i++) {
            Set<Integer> replicaBrokerIds = new HashSet<>();
            Set<String> replicaRacks = new HashSet<>();
            int brokerConflictResolver = 0;
            TopicPartition pInfo = new TopicPartition(topic, i - 1);
            for (int j = 1; j <= datum.replicationFactor(); j++) {
                int randomBrokerId;
                if (replicaDistribution.equals(TestConstants.Distribution.UNIFORM)) {
                    randomBrokerId = uniformlyRandom(0, numBrokers - 1, TestConstants.REPLICA_ASSIGNMENT_SEED + replicaIndex);
                    while (replicaBrokerIds.contains(randomBrokerId) || (rackAware && replicaRacks.contains(cluster.broker(randomBrokerId).rack().id()))) {
                        brokerConflictResolver++;
                        randomBrokerId = uniformlyRandom(0, numBrokers - 1, TestConstants.REPLICA_ASSIGNMENT_SEED + replicaIndex + brokerConflictResolver);
                    }
                } else if (replicaDistribution.equals(TestConstants.Distribution.LINEAR)) {
                    int binRange = (numBrokers * (numBrokers + 1)) / 2;
                    int randomBinValue = uniformlyRandom(1, binRange, TestConstants.REPLICA_ASSIGNMENT_SEED + replicaIndex);
                    randomBrokerId = 0;
                    for (int bin = 1; bin <= numBrokers; bin++) {
                        int binValue = (2 * randomBinValue);
                        if (binValue <= bin * (bin + 1) && binValue > (bin - 1) * bin) {
                            randomBrokerId = bin - 1;
                            break;
                        }
                    }
                    while (replicaBrokerIds.contains(randomBrokerId) || (rackAware && replicaRacks.contains(cluster.broker(randomBrokerId).rack().id()))) {
                        brokerConflictResolver++;
                        randomBinValue = uniformlyRandom(1, binRange, TestConstants.REPLICA_ASSIGNMENT_SEED + replicaIndex + brokerConflictResolver);
                        for (int bin = 1; bin <= numBrokers; bin++) {
                            int binValue = (2 * randomBinValue);
                            if (binValue <= bin * (bin + 1) && binValue > (bin - 1) * bin) {
                                randomBrokerId = bin - 1;
                                break;
                            }
                        }
                    }
                } else {
                    // Exponential.
                    int binRange = numBrokers * numBrokers;
                    int randomBinValue = uniformlyRandom(1, binRange, TestConstants.REPLICA_ASSIGNMENT_SEED + replicaIndex);
                    randomBrokerId = 0;
                    for (int bin = 1; bin <= numBrokers; bin++) {
                        if (randomBinValue <= bin * bin) {
                            randomBrokerId = bin - 1;
                            break;
                        }
                    }
                    while (replicaBrokerIds.contains(randomBrokerId) || (rackAware && replicaRacks.contains(cluster.broker(randomBrokerId).rack().id()))) {
                        brokerConflictResolver++;
                        randomBinValue = uniformlyRandom(1, binRange, TestConstants.REPLICA_ASSIGNMENT_SEED + replicaIndex + brokerConflictResolver);
                        for (int bin = 1; bin <= numBrokers; bin++) {
                            if (randomBinValue <= bin * bin) {
                                randomBrokerId = bin - 1;
                                break;
                            }
                        }
                    }
                }
                // Set leadership properties and replica load.
                AggregatedMetricValues aggregatedMetricValues = new AggregatedMetricValues();
                MetricValues metricValues = new MetricValues(1);
                metricValues.set(0, exponentialRandom(properties.get(ClusterProperty.MEAN_CPU).doubleValue() * topicPopularity, randomByResource.get(Resource.CPU)));
                aggregatedMetricValues.add(KafkaCruiseControlMetricDef.resourceToMetricId(Resource.CPU), metricValues);
                metricValues = new MetricValues(1);
                metricValues.set(0, exponentialRandom(properties.get(ClusterProperty.MEAN_NW_IN).doubleValue() * topicPopularity, randomByResource.get(Resource.NW_IN)));
                aggregatedMetricValues.add(KafkaCruiseControlMetricDef.resourceToMetricId(Resource.NW_IN), metricValues);
                metricValues = new MetricValues(1);
                metricValues.set(0, exponentialRandom(properties.get(ClusterProperty.MEAN_DISK).doubleValue() * topicPopularity, randomByResource.get(Resource.DISK)));
                aggregatedMetricValues.add(KafkaCruiseControlMetricDef.resourceToMetricId(Resource.DISK), metricValues);
                if (j == 1) {
                    metricValues = new MetricValues(1);
                    metricValues.set(0, exponentialRandom(properties.get(ClusterProperty.MEAN_NW_OUT).doubleValue() * topicPopularity, randomByResource.get(Resource.NW_OUT)));
                    aggregatedMetricValues.add(KafkaCruiseControlMetricDef.resourceToMetricId(Resource.NW_OUT), metricValues);
                    cluster.createReplica(cluster.broker(randomBrokerId).rack().id(), randomBrokerId, pInfo, j - 1, true);
                } else {
                    metricValues = new MetricValues(1);
                    metricValues.set(0, 0.0);
                    aggregatedMetricValues.add(KafkaCruiseControlMetricDef.resourceToMetricId(Resource.NW_OUT), metricValues);
                    cluster.createReplica(cluster.broker(randomBrokerId).rack().id(), randomBrokerId, pInfo, j - 1, false);
                }
                cluster.setReplicaLoad(cluster.broker(randomBrokerId).rack().id(), randomBrokerId, pInfo, aggregatedMetricValues, Collections.singletonList(1L));
                // Update the set of replica locations.
                replicaBrokerIds.add(randomBrokerId);
                replicaRacks.add(cluster.broker(randomBrokerId).rack().id());
                // Update next replica index
                replicaIndex++;
            }
            // Move leader away from the first position if requested.
            if (!leaderInFirstPosition) {
                Partition partition = cluster.partition(pInfo);
                partition.swapReplicaPositions(1, partition.replicas().indexOf(partition.leader()));
            }
        }
    }
    // Mark dead brokers
    int numDeadBrokers = properties.get(ClusterProperty.NUM_DEAD_BROKERS).intValue();
    markDeadBrokers(cluster, numDeadBrokers, excludedTopics, leaderInFirstPosition);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Partition(com.linkedin.kafka.cruisecontrol.model.Partition) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) MetricValues(com.linkedin.cruisecontrol.monitor.sampling.aggregator.MetricValues) AggregatedMetricValues(com.linkedin.cruisecontrol.monitor.sampling.aggregator.AggregatedMetricValues) AggregatedMetricValues(com.linkedin.cruisecontrol.monitor.sampling.aggregator.AggregatedMetricValues) Random(java.util.Random) TopicPartition(org.apache.kafka.common.TopicPartition) HashSet(java.util.HashSet)

Aggregations

Partition (com.linkedin.kafka.cruisecontrol.model.Partition)6 HashMap (java.util.HashMap)5 ArrayList (java.util.ArrayList)4 List (java.util.List)4 Map (java.util.Map)4 Broker (com.linkedin.kafka.cruisecontrol.model.Broker)3 TreeSet (java.util.TreeSet)3 TopicPartition (org.apache.kafka.common.TopicPartition)3 AggregatedMetricValues (com.linkedin.cruisecontrol.monitor.sampling.aggregator.AggregatedMetricValues)2 Resource (com.linkedin.kafka.cruisecontrol.common.Resource)2 ClusterModel (com.linkedin.kafka.cruisecontrol.model.ClusterModel)2 HashSet (java.util.HashSet)2 MetricRegistry (com.codahale.metrics.MetricRegistry)1 Gson (com.google.gson.Gson)1 GsonBuilder (com.google.gson.GsonBuilder)1 MetricValues (com.linkedin.cruisecontrol.monitor.sampling.aggregator.MetricValues)1 KafkaClusterState (com.linkedin.kafka.cruisecontrol.KafkaClusterState)1 KafkaCruiseControlState (com.linkedin.kafka.cruisecontrol.KafkaCruiseControlState)1 GoalOptimizer (com.linkedin.kafka.cruisecontrol.analyzer.GoalOptimizer)1 Goal (com.linkedin.kafka.cruisecontrol.analyzer.goals.Goal)1