Search in sources :

Example 6 with ClusterModel

use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.

the class RackAwareGoal method rackAwareEligibleBrokers.

/**
 * Get a list of rack aware eligible brokers for the given replica in the given cluster. A broker is rack aware
 * eligible for a given replica if the broker resides in a rack where no other broker in the same rack contains a
 * replica from the same partition of the given replica.
 *
 * @param replica      Replica for which a set of rack aware eligible brokers are requested.
 * @param clusterModel The state of the cluster.
 * @return A list of rack aware eligible brokers for the given replica in the given cluster.
 */
private SortedSet<Broker> rackAwareEligibleBrokers(Replica replica, ClusterModel clusterModel) {
    // Populate partition rack ids.
    List<String> partitionRackIds = clusterModel.partition(replica.topicPartition()).partitionBrokers().stream().map(partitionBroker -> partitionBroker.rack().id()).collect(Collectors.toList());
    // Remove rack id of the given replica, but if there is any other replica from the partition residing in the
    // same cluster, keep its rack id in the list.
    partitionRackIds.remove(replica.broker().rack().id());
    SortedSet<Broker> rackAwareEligibleBrokers = new TreeSet<>((o1, o2) -> {
        return Integer.compare(o1.id(), o2.id());
    });
    for (Broker broker : clusterModel.healthyBrokers()) {
        if (!partitionRackIds.contains(broker.rack().id())) {
            rackAwareEligibleBrokers.add(broker);
        }
    }
    // Return eligible brokers.
    return rackAwareEligibleBrokers;
}
Also used : Replica(com.linkedin.kafka.cruisecontrol.model.Replica) BROKER_REJECT(com.linkedin.kafka.cruisecontrol.analyzer.ActionAcceptance.BROKER_REJECT) Logger(org.slf4j.Logger) SortedSet(java.util.SortedSet) REPLICA_REJECT(com.linkedin.kafka.cruisecontrol.analyzer.ActionAcceptance.REPLICA_REJECT) BalancingConstraint(com.linkedin.kafka.cruisecontrol.analyzer.BalancingConstraint) ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) LoggerFactory(org.slf4j.LoggerFactory) Set(java.util.Set) AnalyzerUtils(com.linkedin.kafka.cruisecontrol.analyzer.AnalyzerUtils) ACCEPT(com.linkedin.kafka.cruisecontrol.analyzer.ActionAcceptance.ACCEPT) ActionType(com.linkedin.kafka.cruisecontrol.analyzer.ActionType) Function(java.util.function.Function) Collectors(java.util.stream.Collectors) TreeSet(java.util.TreeSet) Broker(com.linkedin.kafka.cruisecontrol.model.Broker) HashSet(java.util.HashSet) List(java.util.List) BalancingAction(com.linkedin.kafka.cruisecontrol.analyzer.BalancingAction) ClusterModelStats(com.linkedin.kafka.cruisecontrol.model.ClusterModelStats) OptimizationFailureException(com.linkedin.kafka.cruisecontrol.exception.OptimizationFailureException) Map(java.util.Map) ActionAcceptance(com.linkedin.kafka.cruisecontrol.analyzer.ActionAcceptance) ModelCompletenessRequirements(com.linkedin.kafka.cruisecontrol.monitor.ModelCompletenessRequirements) Broker(com.linkedin.kafka.cruisecontrol.model.Broker) TreeSet(java.util.TreeSet)

Example 7 with ClusterModel

use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.

the class KafkaCruiseControl method addBrokers.

/**
 * Add brokers
 * @param brokerIds the broker ids.
 * @param dryRun whether it is a dry run or not.
 * @param throttleAddedBrokers whether throttle the brokers that are being added.
 * @param goals the goals to be met when adding the brokers. When empty all goals will be used.
 * @param requirements The cluster model completeness requirements.
 * @param operationProgress The progress of the job to update.
 * @return The optimization result.
 * @throws KafkaCruiseControlException when any exception occurred during the broker addition.
 */
public GoalOptimizer.OptimizerResult addBrokers(Collection<Integer> brokerIds, boolean dryRun, boolean throttleAddedBrokers, List<String> goals, ModelCompletenessRequirements requirements, OperationProgress operationProgress) throws KafkaCruiseControlException {
    try (AutoCloseable ignored = _loadMonitor.acquireForModelGeneration(operationProgress)) {
        Map<Integer, Goal> goalsByPriority = goalsByPriority(goals);
        ModelCompletenessRequirements modelCompletenessRequirements = modelCompletenessRequirements(goalsByPriority.values()).weaker(requirements);
        ClusterModel clusterModel = _loadMonitor.clusterModel(_time.milliseconds(), modelCompletenessRequirements, operationProgress);
        brokerIds.forEach(id -> clusterModel.setBrokerState(id, Broker.State.NEW));
        GoalOptimizer.OptimizerResult result = getOptimizationProposals(clusterModel, goalsByPriority, operationProgress);
        if (!dryRun) {
            executeProposals(result.goalProposals(), throttleAddedBrokers ? Collections.emptyList() : brokerIds);
        }
        return result;
    } catch (KafkaCruiseControlException kcce) {
        throw kcce;
    } catch (Exception e) {
        throw new KafkaCruiseControlException(e);
    }
}
Also used : ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) Goal(com.linkedin.kafka.cruisecontrol.analyzer.goals.Goal) GoalOptimizer(com.linkedin.kafka.cruisecontrol.analyzer.GoalOptimizer) KafkaCruiseControlException(com.linkedin.kafka.cruisecontrol.exception.KafkaCruiseControlException) ModelCompletenessRequirements(com.linkedin.kafka.cruisecontrol.monitor.ModelCompletenessRequirements) KafkaCruiseControlException(com.linkedin.kafka.cruisecontrol.exception.KafkaCruiseControlException)

Example 8 with ClusterModel

use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.

the class KafkaCruiseControl method decommissionBrokers.

/**
 * Decommission a broker.
 *
 * @param brokerIds The brokers to decommission.
 * @param dryRun throw
 * @param throttleDecommissionedBroker whether throttle the brokers that are being decommissioned.
 * @param goals the goals to be met when decommissioning the brokers. When empty all goals will be used.
 * @param requirements The cluster model completeness requirements.
 * @param operationProgress the progress to report.
 * @return the optimization result.
 *
 * @throws KafkaCruiseControlException when any exception occurred during the decommission process.
 */
public GoalOptimizer.OptimizerResult decommissionBrokers(Collection<Integer> brokerIds, boolean dryRun, boolean throttleDecommissionedBroker, List<String> goals, ModelCompletenessRequirements requirements, OperationProgress operationProgress) throws KafkaCruiseControlException {
    Map<Integer, Goal> goalsByPriority = goalsByPriority(goals);
    ModelCompletenessRequirements modelCompletenessRequirements = modelCompletenessRequirements(goalsByPriority.values()).weaker(requirements);
    try (AutoCloseable ignored = _loadMonitor.acquireForModelGeneration(operationProgress)) {
        ClusterModel clusterModel = _loadMonitor.clusterModel(_time.milliseconds(), modelCompletenessRequirements, operationProgress);
        brokerIds.forEach(id -> clusterModel.setBrokerState(id, Broker.State.DEAD));
        GoalOptimizer.OptimizerResult result = getOptimizationProposals(clusterModel, goalsByPriority, operationProgress);
        if (!dryRun) {
            executeProposals(result.goalProposals(), throttleDecommissionedBroker ? Collections.emptyList() : brokerIds);
        }
        return result;
    } catch (KafkaCruiseControlException kcce) {
        throw kcce;
    } catch (Exception e) {
        throw new KafkaCruiseControlException(e);
    }
}
Also used : ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) Goal(com.linkedin.kafka.cruisecontrol.analyzer.goals.Goal) GoalOptimizer(com.linkedin.kafka.cruisecontrol.analyzer.GoalOptimizer) KafkaCruiseControlException(com.linkedin.kafka.cruisecontrol.exception.KafkaCruiseControlException) ModelCompletenessRequirements(com.linkedin.kafka.cruisecontrol.monitor.ModelCompletenessRequirements) KafkaCruiseControlException(com.linkedin.kafka.cruisecontrol.exception.KafkaCruiseControlException)

Example 9 with ClusterModel

use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.

the class KafkaCruiseControlServlet method getPartitionLoad.

private boolean getPartitionLoad(HttpServletRequest request, HttpServletResponse response) throws Exception {
    Resource resource;
    Long startMs;
    Long endMs;
    boolean json = wantJSON(request);
    try {
        String resourceString = request.getParameter(RESOURCE_PARAM);
        try {
            if (resourceString == null) {
                resourceString = DEFAULT_PARTITION_LOAD_RESOURCE;
            }
            resource = Resource.valueOf(resourceString.toUpperCase());
        } catch (IllegalArgumentException iae) {
            String errorMsg = String.format("Invalid resource type %s. The resource type must be one of the following: " + "CPU, DISK, NW_IN, NW_OUT", resourceString);
            StringWriter sw = new StringWriter();
            iae.printStackTrace(new PrintWriter(sw));
            setErrorResponse(response, sw.toString(), errorMsg, SC_BAD_REQUEST, json);
            // Close session
            return true;
        }
        String startMsString = request.getParameter(START_MS_PARAM);
        String endMsString = request.getParameter(END_MS_PARAM);
        startMs = startMsString == null ? -1L : Long.parseLong(startMsString);
        endMs = endMsString == null ? System.currentTimeMillis() : Long.parseLong(endMsString);
    } catch (Exception e) {
        StringWriter sw = new StringWriter();
        e.printStackTrace(new PrintWriter(sw));
        setErrorResponse(response, sw.toString(), e.getMessage(), SC_BAD_REQUEST, json);
        // Close session
        return true;
    }
    ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false);
    // Get cluster model asynchronously.
    ClusterModel clusterModel = getAndMaybeReturnProgress(request, response, () -> _asyncKafkaCruiseControl.clusterModel(startMs, endMs, requirements));
    if (clusterModel == null) {
        return false;
    }
    List<Partition> sortedPartitions = clusterModel.replicasSortedByUtilization(resource);
    OutputStream out = response.getOutputStream();
    String entriesString = request.getParameter(ENTRIES);
    Integer entries = entriesString == null ? Integer.MAX_VALUE : Integer.parseInt(entriesString);
    int numEntries = 0;
    if (!json) {
        int topicNameLength = clusterModel.topics().stream().mapToInt(String::length).max().orElse(20) + 5;
        setResponseCode(response, SC_OK);
        out.write(String.format("%" + topicNameLength + "s%10s%30s%20s%20s%20s%20s%n", "PARTITION", "LEADER", "FOLLOWERS", "CPU (%)", "DISK (MB)", "NW_IN (KB/s)", "NW_OUT (KB/s)").getBytes(StandardCharsets.UTF_8));
        for (Partition p : sortedPartitions) {
            if (++numEntries > entries) {
                break;
            }
            List<Integer> followers = p.followers().stream().map((replica) -> replica.broker().id()).collect(Collectors.toList());
            out.write(String.format("%" + topicNameLength + "s%10s%30s%19.6f%19.3f%19.3f%19.3f%n", p.leader().topicPartition(), p.leader().broker().id(), followers, p.leader().load().expectedUtilizationFor(Resource.CPU), p.leader().load().expectedUtilizationFor(Resource.DISK), p.leader().load().expectedUtilizationFor(Resource.NW_IN), p.leader().load().expectedUtilizationFor(Resource.NW_OUT)).getBytes(StandardCharsets.UTF_8));
        }
    } else {
        Map<String, Object> partitionMap = new HashMap<>();
        List<Object> partitionList = new ArrayList<>();
        List<String> header = new ArrayList<>(Arrays.asList("topic", "partition", "leader", "followers", "CPU", "DISK", "NW_IN", "NW_OUT"));
        partitionMap.put("version", JSON_VERSION);
        partitionMap.put("header", header);
        for (Partition p : sortedPartitions) {
            if (++numEntries > entries) {
                break;
            }
            List<Integer> followers = p.followers().stream().map((replica) -> replica.broker().id()).collect(Collectors.toList());
            List<Object> record = new ArrayList<>();
            record.add(p.leader().topicPartition().topic());
            record.add(p.leader().topicPartition().partition());
            record.add(p.leader().broker().id());
            record.add(followers);
            record.add(p.leader().load().expectedUtilizationFor(Resource.CPU));
            record.add(p.leader().load().expectedUtilizationFor(Resource.DISK));
            record.add(p.leader().load().expectedUtilizationFor(Resource.NW_IN));
            record.add(p.leader().load().expectedUtilizationFor(Resource.NW_OUT));
            partitionList.add(record);
        }
        partitionMap.put("records", partitionList);
        Gson gson = new Gson();
        String g = gson.toJson(partitionMap);
        setJSONResponseCode(response, SC_OK);
        response.setContentLength(g.length());
        out.write(g.getBytes(StandardCharsets.UTF_8));
    }
    out.flush();
    return true;
}
Also used : Arrays(java.util.Arrays) SortedSet(java.util.SortedSet) LoggerFactory(org.slf4j.LoggerFactory) TimeoutException(java.util.concurrent.TimeoutException) GsonBuilder(com.google.gson.GsonBuilder) OperationFuture(com.linkedin.kafka.cruisecontrol.async.OperationFuture) Cluster(org.apache.kafka.common.Cluster) Gson(com.google.gson.Gson) Map(java.util.Map) ExecutorState(com.linkedin.kafka.cruisecontrol.executor.ExecutorState) KafkaClusterState(com.linkedin.kafka.cruisecontrol.KafkaClusterState) GoalOptimizer(com.linkedin.kafka.cruisecontrol.analyzer.GoalOptimizer) PrintWriter(java.io.PrintWriter) TopicPartition(org.apache.kafka.common.TopicPartition) HttpServlet(javax.servlet.http.HttpServlet) Time(org.apache.kafka.common.utils.Time) ExecutionTask(com.linkedin.kafka.cruisecontrol.executor.ExecutionTask) EndPoint(com.linkedin.kafka.cruisecontrol.servlet.KafkaCruiseControlServlet.EndPoint) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) KafkaAssignerEvenRackAwareGoal(com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerEvenRackAwareGoal) Collectors(java.util.stream.Collectors) StandardCharsets(java.nio.charset.StandardCharsets) SC_NOT_FOUND(javax.servlet.http.HttpServletResponse.SC_NOT_FOUND) Goal(com.linkedin.kafka.cruisecontrol.analyzer.goals.Goal) List(java.util.List) AsyncKafkaCruiseControl(com.linkedin.kafka.cruisecontrol.async.AsyncKafkaCruiseControl) Resource(com.linkedin.kafka.cruisecontrol.common.Resource) ClusterModelStats(com.linkedin.kafka.cruisecontrol.model.ClusterModelStats) VALID_WINDOWS(com.linkedin.kafka.cruisecontrol.servlet.KafkaCruiseControlServlet.DataFrom.VALID_WINDOWS) SC_OK(javax.servlet.http.HttpServletResponse.SC_OK) Node(org.apache.kafka.common.Node) UnsupportedEncodingException(java.io.UnsupportedEncodingException) SortedMap(java.util.SortedMap) SC_INTERNAL_SERVER_ERROR(javax.servlet.http.HttpServletResponse.SC_INTERNAL_SERVER_ERROR) ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) HashMap(java.util.HashMap) Supplier(java.util.function.Supplier) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) HttpServletRequest(javax.servlet.http.HttpServletRequest) ServletOutputStream(javax.servlet.ServletOutputStream) OutputStreamWriter(java.io.OutputStreamWriter) Partition(com.linkedin.kafka.cruisecontrol.model.Partition) OutputStream(java.io.OutputStream) MetricRegistry(com.codahale.metrics.MetricRegistry) Logger(org.slf4j.Logger) KafkaCruiseControlState(com.linkedin.kafka.cruisecontrol.KafkaCruiseControlState) StringWriter(java.io.StringWriter) HttpServletResponse(javax.servlet.http.HttpServletResponse) IOException(java.io.IOException) SampleExtrapolation(com.linkedin.kafka.cruisecontrol.monitor.sampling.aggregator.SampleExtrapolation) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) URLEncoder(java.net.URLEncoder) TreeMap(java.util.TreeMap) StringJoiner(java.util.StringJoiner) SC_BAD_REQUEST(javax.servlet.http.HttpServletResponse.SC_BAD_REQUEST) Comparator(java.util.Comparator) Collections(java.util.Collections) KafkaAssignerDiskUsageDistributionGoal(com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerDiskUsageDistributionGoal) ModelCompletenessRequirements(com.linkedin.kafka.cruisecontrol.monitor.ModelCompletenessRequirements) TopicPartition(org.apache.kafka.common.TopicPartition) Partition(com.linkedin.kafka.cruisecontrol.model.Partition) HashMap(java.util.HashMap) ServletOutputStream(javax.servlet.ServletOutputStream) OutputStream(java.io.OutputStream) Resource(com.linkedin.kafka.cruisecontrol.common.Resource) ArrayList(java.util.ArrayList) Gson(com.google.gson.Gson) TimeoutException(java.util.concurrent.TimeoutException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) EndPoint(com.linkedin.kafka.cruisecontrol.servlet.KafkaCruiseControlServlet.EndPoint) ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) StringWriter(java.io.StringWriter) ModelCompletenessRequirements(com.linkedin.kafka.cruisecontrol.monitor.ModelCompletenessRequirements) PrintWriter(java.io.PrintWriter)

Example 10 with ClusterModel

use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.

the class KafkaCruiseControlServlet method getClusterLoad.

private boolean getClusterLoad(HttpServletRequest request, HttpServletResponse response) throws Exception {
    long time;
    String granularity;
    boolean json = wantJSON(request);
    try {
        String timeString = request.getParameter(TIME_PARAM);
        time = (timeString == null || timeString.toUpperCase().equals("NOW")) ? System.currentTimeMillis() : Long.parseLong(timeString);
        granularity = request.getParameter(GRANULARITY_PARAM);
    } catch (Exception e) {
        StringWriter sw = new StringWriter();
        e.printStackTrace(new PrintWriter(sw));
        setErrorResponse(response, sw.toString(), e.getMessage(), SC_BAD_REQUEST, json);
        // Close session
        return true;
    }
    ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, true);
    if (granularity == null || granularity.toLowerCase().equals(GRANULARITY_BROKER)) {
        ClusterModel.BrokerStats brokerStats = _asyncKafkaCruiseControl.cachedBrokerLoadStats();
        String brokerLoad;
        if (brokerStats != null) {
            if (json) {
                brokerLoad = brokerStats.getJSONString(JSON_VERSION);
            } else {
                brokerLoad = brokerStats.toString();
            }
        } else {
            // Get the broker stats asynchronously.
            brokerStats = getAndMaybeReturnProgress(request, response, () -> _asyncKafkaCruiseControl.getBrokerStats(time, requirements));
            if (brokerStats == null) {
                return false;
            }
            if (json) {
                brokerLoad = brokerStats.getJSONString(JSON_VERSION);
            } else {
                brokerLoad = brokerStats.toString();
            }
        }
        if (json) {
            setJSONResponseCode(response, SC_OK);
        } else {
            setResponseCode(response, SC_OK);
        }
        response.setContentLength(brokerLoad.length());
        response.getOutputStream().write(brokerLoad.getBytes(StandardCharsets.UTF_8));
    } else if (granularity.toLowerCase().equals(GRANULARITY_REPLICA)) {
        // Get the cluster model asynchronously
        ClusterModel clusterModel = getAndMaybeReturnProgress(request, response, () -> _asyncKafkaCruiseControl.clusterModel(time, requirements));
        if (clusterModel == null) {
            return false;
        }
        if (json) {
            String data = clusterModel.getJSONString(JSON_VERSION);
            setJSONResponseCode(response, SC_OK);
            response.setContentLength(data.length());
            ServletOutputStream os = response.getOutputStream();
            OutputStreamWriter writer = new OutputStreamWriter(os, StandardCharsets.UTF_8);
            writer.write(data);
            writer.flush();
        } else {
            setResponseCode(response, SC_OK);
            // Write to stream to avoid expensive toString() call.
            clusterModel.writeTo(response.getOutputStream());
        }
    } else {
        String errorMsg = String.format("Unknown granularity %s", granularity);
        setErrorResponse(response, "", errorMsg, SC_BAD_REQUEST, json);
        // Close session
        return true;
    }
    response.getOutputStream().flush();
    return true;
}
Also used : ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) StringWriter(java.io.StringWriter) ServletOutputStream(javax.servlet.ServletOutputStream) ModelCompletenessRequirements(com.linkedin.kafka.cruisecontrol.monitor.ModelCompletenessRequirements) OutputStreamWriter(java.io.OutputStreamWriter) TimeoutException(java.util.concurrent.TimeoutException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) PrintWriter(java.io.PrintWriter)

Aggregations

ClusterModel (com.linkedin.kafka.cruisecontrol.model.ClusterModel)38 TopicPartition (org.apache.kafka.common.TopicPartition)12 ModelCompletenessRequirements (com.linkedin.kafka.cruisecontrol.monitor.ModelCompletenessRequirements)11 Test (org.junit.Test)11 BalancingConstraint (com.linkedin.kafka.cruisecontrol.analyzer.BalancingConstraint)10 Replica (com.linkedin.kafka.cruisecontrol.model.Replica)10 Broker (com.linkedin.kafka.cruisecontrol.model.Broker)9 ClusterModelStats (com.linkedin.kafka.cruisecontrol.model.ClusterModelStats)9 List (java.util.List)9 HashSet (java.util.HashSet)8 Goal (com.linkedin.kafka.cruisecontrol.analyzer.goals.Goal)7 Resource (com.linkedin.kafka.cruisecontrol.common.Resource)7 Set (java.util.Set)7 Logger (org.slf4j.Logger)7 LoggerFactory (org.slf4j.LoggerFactory)7 ActionAcceptance (com.linkedin.kafka.cruisecontrol.analyzer.ActionAcceptance)6 BalancingAction (com.linkedin.kafka.cruisecontrol.analyzer.BalancingAction)6 OperationProgress (com.linkedin.kafka.cruisecontrol.async.progress.OperationProgress)6 ArrayList (java.util.ArrayList)6 Comparator (java.util.Comparator)6