use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.
the class RackAwareGoal method rackAwareEligibleBrokers.
/**
* Get a list of rack aware eligible brokers for the given replica in the given cluster. A broker is rack aware
* eligible for a given replica if the broker resides in a rack where no other broker in the same rack contains a
* replica from the same partition of the given replica.
*
* @param replica Replica for which a set of rack aware eligible brokers are requested.
* @param clusterModel The state of the cluster.
* @return A list of rack aware eligible brokers for the given replica in the given cluster.
*/
private SortedSet<Broker> rackAwareEligibleBrokers(Replica replica, ClusterModel clusterModel) {
// Populate partition rack ids.
List<String> partitionRackIds = clusterModel.partition(replica.topicPartition()).partitionBrokers().stream().map(partitionBroker -> partitionBroker.rack().id()).collect(Collectors.toList());
// Remove rack id of the given replica, but if there is any other replica from the partition residing in the
// same cluster, keep its rack id in the list.
partitionRackIds.remove(replica.broker().rack().id());
SortedSet<Broker> rackAwareEligibleBrokers = new TreeSet<>((o1, o2) -> {
return Integer.compare(o1.id(), o2.id());
});
for (Broker broker : clusterModel.healthyBrokers()) {
if (!partitionRackIds.contains(broker.rack().id())) {
rackAwareEligibleBrokers.add(broker);
}
}
// Return eligible brokers.
return rackAwareEligibleBrokers;
}
use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.
the class KafkaCruiseControl method addBrokers.
/**
* Add brokers
* @param brokerIds the broker ids.
* @param dryRun whether it is a dry run or not.
* @param throttleAddedBrokers whether throttle the brokers that are being added.
* @param goals the goals to be met when adding the brokers. When empty all goals will be used.
* @param requirements The cluster model completeness requirements.
* @param operationProgress The progress of the job to update.
* @return The optimization result.
* @throws KafkaCruiseControlException when any exception occurred during the broker addition.
*/
public GoalOptimizer.OptimizerResult addBrokers(Collection<Integer> brokerIds, boolean dryRun, boolean throttleAddedBrokers, List<String> goals, ModelCompletenessRequirements requirements, OperationProgress operationProgress) throws KafkaCruiseControlException {
try (AutoCloseable ignored = _loadMonitor.acquireForModelGeneration(operationProgress)) {
Map<Integer, Goal> goalsByPriority = goalsByPriority(goals);
ModelCompletenessRequirements modelCompletenessRequirements = modelCompletenessRequirements(goalsByPriority.values()).weaker(requirements);
ClusterModel clusterModel = _loadMonitor.clusterModel(_time.milliseconds(), modelCompletenessRequirements, operationProgress);
brokerIds.forEach(id -> clusterModel.setBrokerState(id, Broker.State.NEW));
GoalOptimizer.OptimizerResult result = getOptimizationProposals(clusterModel, goalsByPriority, operationProgress);
if (!dryRun) {
executeProposals(result.goalProposals(), throttleAddedBrokers ? Collections.emptyList() : brokerIds);
}
return result;
} catch (KafkaCruiseControlException kcce) {
throw kcce;
} catch (Exception e) {
throw new KafkaCruiseControlException(e);
}
}
use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.
the class KafkaCruiseControl method decommissionBrokers.
/**
* Decommission a broker.
*
* @param brokerIds The brokers to decommission.
* @param dryRun throw
* @param throttleDecommissionedBroker whether throttle the brokers that are being decommissioned.
* @param goals the goals to be met when decommissioning the brokers. When empty all goals will be used.
* @param requirements The cluster model completeness requirements.
* @param operationProgress the progress to report.
* @return the optimization result.
*
* @throws KafkaCruiseControlException when any exception occurred during the decommission process.
*/
public GoalOptimizer.OptimizerResult decommissionBrokers(Collection<Integer> brokerIds, boolean dryRun, boolean throttleDecommissionedBroker, List<String> goals, ModelCompletenessRequirements requirements, OperationProgress operationProgress) throws KafkaCruiseControlException {
Map<Integer, Goal> goalsByPriority = goalsByPriority(goals);
ModelCompletenessRequirements modelCompletenessRequirements = modelCompletenessRequirements(goalsByPriority.values()).weaker(requirements);
try (AutoCloseable ignored = _loadMonitor.acquireForModelGeneration(operationProgress)) {
ClusterModel clusterModel = _loadMonitor.clusterModel(_time.milliseconds(), modelCompletenessRequirements, operationProgress);
brokerIds.forEach(id -> clusterModel.setBrokerState(id, Broker.State.DEAD));
GoalOptimizer.OptimizerResult result = getOptimizationProposals(clusterModel, goalsByPriority, operationProgress);
if (!dryRun) {
executeProposals(result.goalProposals(), throttleDecommissionedBroker ? Collections.emptyList() : brokerIds);
}
return result;
} catch (KafkaCruiseControlException kcce) {
throw kcce;
} catch (Exception e) {
throw new KafkaCruiseControlException(e);
}
}
use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.
the class KafkaCruiseControlServlet method getPartitionLoad.
private boolean getPartitionLoad(HttpServletRequest request, HttpServletResponse response) throws Exception {
Resource resource;
Long startMs;
Long endMs;
boolean json = wantJSON(request);
try {
String resourceString = request.getParameter(RESOURCE_PARAM);
try {
if (resourceString == null) {
resourceString = DEFAULT_PARTITION_LOAD_RESOURCE;
}
resource = Resource.valueOf(resourceString.toUpperCase());
} catch (IllegalArgumentException iae) {
String errorMsg = String.format("Invalid resource type %s. The resource type must be one of the following: " + "CPU, DISK, NW_IN, NW_OUT", resourceString);
StringWriter sw = new StringWriter();
iae.printStackTrace(new PrintWriter(sw));
setErrorResponse(response, sw.toString(), errorMsg, SC_BAD_REQUEST, json);
// Close session
return true;
}
String startMsString = request.getParameter(START_MS_PARAM);
String endMsString = request.getParameter(END_MS_PARAM);
startMs = startMsString == null ? -1L : Long.parseLong(startMsString);
endMs = endMsString == null ? System.currentTimeMillis() : Long.parseLong(endMsString);
} catch (Exception e) {
StringWriter sw = new StringWriter();
e.printStackTrace(new PrintWriter(sw));
setErrorResponse(response, sw.toString(), e.getMessage(), SC_BAD_REQUEST, json);
// Close session
return true;
}
ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false);
// Get cluster model asynchronously.
ClusterModel clusterModel = getAndMaybeReturnProgress(request, response, () -> _asyncKafkaCruiseControl.clusterModel(startMs, endMs, requirements));
if (clusterModel == null) {
return false;
}
List<Partition> sortedPartitions = clusterModel.replicasSortedByUtilization(resource);
OutputStream out = response.getOutputStream();
String entriesString = request.getParameter(ENTRIES);
Integer entries = entriesString == null ? Integer.MAX_VALUE : Integer.parseInt(entriesString);
int numEntries = 0;
if (!json) {
int topicNameLength = clusterModel.topics().stream().mapToInt(String::length).max().orElse(20) + 5;
setResponseCode(response, SC_OK);
out.write(String.format("%" + topicNameLength + "s%10s%30s%20s%20s%20s%20s%n", "PARTITION", "LEADER", "FOLLOWERS", "CPU (%)", "DISK (MB)", "NW_IN (KB/s)", "NW_OUT (KB/s)").getBytes(StandardCharsets.UTF_8));
for (Partition p : sortedPartitions) {
if (++numEntries > entries) {
break;
}
List<Integer> followers = p.followers().stream().map((replica) -> replica.broker().id()).collect(Collectors.toList());
out.write(String.format("%" + topicNameLength + "s%10s%30s%19.6f%19.3f%19.3f%19.3f%n", p.leader().topicPartition(), p.leader().broker().id(), followers, p.leader().load().expectedUtilizationFor(Resource.CPU), p.leader().load().expectedUtilizationFor(Resource.DISK), p.leader().load().expectedUtilizationFor(Resource.NW_IN), p.leader().load().expectedUtilizationFor(Resource.NW_OUT)).getBytes(StandardCharsets.UTF_8));
}
} else {
Map<String, Object> partitionMap = new HashMap<>();
List<Object> partitionList = new ArrayList<>();
List<String> header = new ArrayList<>(Arrays.asList("topic", "partition", "leader", "followers", "CPU", "DISK", "NW_IN", "NW_OUT"));
partitionMap.put("version", JSON_VERSION);
partitionMap.put("header", header);
for (Partition p : sortedPartitions) {
if (++numEntries > entries) {
break;
}
List<Integer> followers = p.followers().stream().map((replica) -> replica.broker().id()).collect(Collectors.toList());
List<Object> record = new ArrayList<>();
record.add(p.leader().topicPartition().topic());
record.add(p.leader().topicPartition().partition());
record.add(p.leader().broker().id());
record.add(followers);
record.add(p.leader().load().expectedUtilizationFor(Resource.CPU));
record.add(p.leader().load().expectedUtilizationFor(Resource.DISK));
record.add(p.leader().load().expectedUtilizationFor(Resource.NW_IN));
record.add(p.leader().load().expectedUtilizationFor(Resource.NW_OUT));
partitionList.add(record);
}
partitionMap.put("records", partitionList);
Gson gson = new Gson();
String g = gson.toJson(partitionMap);
setJSONResponseCode(response, SC_OK);
response.setContentLength(g.length());
out.write(g.getBytes(StandardCharsets.UTF_8));
}
out.flush();
return true;
}
use of com.linkedin.kafka.cruisecontrol.model.ClusterModel in project cruise-control by linkedin.
the class KafkaCruiseControlServlet method getClusterLoad.
private boolean getClusterLoad(HttpServletRequest request, HttpServletResponse response) throws Exception {
long time;
String granularity;
boolean json = wantJSON(request);
try {
String timeString = request.getParameter(TIME_PARAM);
time = (timeString == null || timeString.toUpperCase().equals("NOW")) ? System.currentTimeMillis() : Long.parseLong(timeString);
granularity = request.getParameter(GRANULARITY_PARAM);
} catch (Exception e) {
StringWriter sw = new StringWriter();
e.printStackTrace(new PrintWriter(sw));
setErrorResponse(response, sw.toString(), e.getMessage(), SC_BAD_REQUEST, json);
// Close session
return true;
}
ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, true);
if (granularity == null || granularity.toLowerCase().equals(GRANULARITY_BROKER)) {
ClusterModel.BrokerStats brokerStats = _asyncKafkaCruiseControl.cachedBrokerLoadStats();
String brokerLoad;
if (brokerStats != null) {
if (json) {
brokerLoad = brokerStats.getJSONString(JSON_VERSION);
} else {
brokerLoad = brokerStats.toString();
}
} else {
// Get the broker stats asynchronously.
brokerStats = getAndMaybeReturnProgress(request, response, () -> _asyncKafkaCruiseControl.getBrokerStats(time, requirements));
if (brokerStats == null) {
return false;
}
if (json) {
brokerLoad = brokerStats.getJSONString(JSON_VERSION);
} else {
brokerLoad = brokerStats.toString();
}
}
if (json) {
setJSONResponseCode(response, SC_OK);
} else {
setResponseCode(response, SC_OK);
}
response.setContentLength(brokerLoad.length());
response.getOutputStream().write(brokerLoad.getBytes(StandardCharsets.UTF_8));
} else if (granularity.toLowerCase().equals(GRANULARITY_REPLICA)) {
// Get the cluster model asynchronously
ClusterModel clusterModel = getAndMaybeReturnProgress(request, response, () -> _asyncKafkaCruiseControl.clusterModel(time, requirements));
if (clusterModel == null) {
return false;
}
if (json) {
String data = clusterModel.getJSONString(JSON_VERSION);
setJSONResponseCode(response, SC_OK);
response.setContentLength(data.length());
ServletOutputStream os = response.getOutputStream();
OutputStreamWriter writer = new OutputStreamWriter(os, StandardCharsets.UTF_8);
writer.write(data);
writer.flush();
} else {
setResponseCode(response, SC_OK);
// Write to stream to avoid expensive toString() call.
clusterModel.writeTo(response.getOutputStream());
}
} else {
String errorMsg = String.format("Unknown granularity %s", granularity);
setErrorResponse(response, "", errorMsg, SC_BAD_REQUEST, json);
// Close session
return true;
}
response.getOutputStream().flush();
return true;
}
Aggregations