use of com.linkedin.kafka.cruisecontrol.analyzer.OptimizerResult in project cruise-control by linkedin.
the class DemoteBrokerRunnable method workWithClusterModel.
@Override
protected OptimizerResult workWithClusterModel() throws KafkaCruiseControlException, TimeoutException, NotEnoughValidWindowsException {
ensureDisjoint(_brokerIds, _brokerIdAndLogdirs.keySet(), "Attempt to demote the broker and its disk in the same request is not allowed.");
Set<Integer> brokersToCheckPresence = new HashSet<>(_brokerIds);
brokersToCheckPresence.addAll(_brokerIdAndLogdirs.keySet());
_kafkaCruiseControl.sanityCheckBrokerPresence(brokersToCheckPresence);
ClusterModel clusterModel = _brokerIdAndLogdirs.isEmpty() ? _kafkaCruiseControl.clusterModel(_combinedCompletenessRequirements, _allowCapacityEstimation, _operationProgress) : _kafkaCruiseControl.clusterModel(DEFAULT_START_TIME_FOR_CLUSTER_MODEL, _kafkaCruiseControl.timeMs(), _combinedCompletenessRequirements, true, _allowCapacityEstimation, _operationProgress);
setDemoteState(clusterModel);
OptimizationOptions optimizationOptions = computeOptimizationOptions(clusterModel, false, _kafkaCruiseControl, Collections.emptySet(), _dryRun, _excludeRecentlyDemotedBrokers, _excludeRecentlyRemovedBrokers, _excludedTopics, Collections.emptySet(), false, _fastMode);
OptimizerResult result = _kafkaCruiseControl.optimizations(clusterModel, _goalsByPriority, _operationProgress, null, optimizationOptions);
if (!_dryRun) {
_kafkaCruiseControl.executeDemotion(result.goalProposals(), _brokerIds, _concurrentLeaderMovements, clusterModel.brokers().size(), _executionProgressCheckIntervalMs, _replicaMovementStrategy, _replicationThrottle, _isTriggeredByUserRequest, _uuid);
}
return result;
}
use of com.linkedin.kafka.cruisecontrol.analyzer.OptimizerResult in project cruise-control by linkedin.
the class UpdateTopicConfigurationRunnable method workWithClusterModel.
@Override
protected OptimizerResult workWithClusterModel() throws KafkaCruiseControlException, TimeoutException, NotEnoughValidWindowsException {
Map<String, List<Integer>> brokersByRack = new HashMap<>();
Map<Integer, String> rackByBroker = new HashMap<>();
ClusterModel clusterModel = _kafkaCruiseControl.clusterModel(_combinedCompletenessRequirements, _allowCapacityEstimation, _operationProgress);
if (!clusterModel.isClusterAlive()) {
throw new IllegalArgumentException("All brokers are dead in the cluster.");
}
OptimizationOptions optimizationOptions = computeOptimizationOptions(clusterModel, false, _kafkaCruiseControl, Collections.emptySet(), _dryRun, _excludeRecentlyDemotedBrokers, _excludeRecentlyRemovedBrokers, _excludedTopics, Collections.emptySet(), true, _fastMode);
populateRackInfoForReplicationFactorChange(_topicsToChangeByReplicationFactor, _cluster, _skipRackAwarenessCheck, brokersByRack, rackByBroker);
Map<TopicPartition, List<ReplicaPlacementInfo>> initReplicaDistribution = clusterModel.getReplicaDistribution();
// First try to add and remove replicas to achieve the replication factor for topics of interest.
clusterModel.createOrDeleteReplicas(_topicsToChangeByReplicationFactor, brokersByRack, rackByBroker, _cluster);
// Then further optimize the location of newly added replicas based on goals. Here we restrict the replica movement to
// only considering newly added replicas, in order to minimize the total bytes to move.
OptimizerResult result = _kafkaCruiseControl.optimizations(clusterModel, _goalsByPriority, _operationProgress, initReplicaDistribution, optimizationOptions);
if (!_dryRun) {
_kafkaCruiseControl.executeProposals(result.goalProposals(), Collections.emptySet(), false, _concurrentInterBrokerPartitionMovements, _maxInterBrokerPartitionMovements, 0, _concurrentLeaderMovements, _executionProgressCheckIntervalMs, _replicaMovementStrategy, _replicationThrottle, _isTriggeredByUserRequest, _uuid, SKIP_AUTO_REFRESHING_CONCURRENCY);
}
return result;
}
use of com.linkedin.kafka.cruisecontrol.analyzer.OptimizerResult in project cruise-control by linkedin.
the class AddBrokersRunnable method workWithClusterModel.
@Override
protected OptimizerResult workWithClusterModel() throws KafkaCruiseControlException, TimeoutException, NotEnoughValidWindowsException {
_kafkaCruiseControl.sanityCheckBrokerPresence(_brokerIds);
ClusterModel clusterModel = _kafkaCruiseControl.clusterModel(_combinedCompletenessRequirements, _allowCapacityEstimation, _operationProgress);
sanityCheckBrokersHavingOfflineReplicasOnBadDisks(_goals, clusterModel);
_brokerIds.forEach(id -> clusterModel.setBrokerState(id, Broker.State.NEW));
if (!clusterModel.isClusterAlive()) {
throw new IllegalArgumentException("All brokers are dead in the cluster.");
}
OptimizationOptions optimizationOptions = computeOptimizationOptions(clusterModel, false, _kafkaCruiseControl, _brokerIds, _dryRun, _excludeRecentlyDemotedBrokers, _excludeRecentlyRemovedBrokers, _excludedTopics, Collections.emptySet(), false, _fastMode);
OptimizerResult result = _kafkaCruiseControl.optimizations(clusterModel, _goalsByPriority, _operationProgress, null, optimizationOptions);
if (!_dryRun) {
_kafkaCruiseControl.executeProposals(result.goalProposals(), _throttleAddedBrokers ? Collections.emptySet() : _brokerIds, isKafkaAssignerMode(_goals), _concurrentInterBrokerPartitionMovements, _maxInterBrokerPartitionMovements, null, _concurrentLeaderMovements, _executionProgressCheckIntervalMs, _replicaMovementStrategy, _replicationThrottle, _isTriggeredByUserRequest, _uuid, SKIP_AUTO_REFRESHING_CONCURRENCY);
}
return result;
}
use of com.linkedin.kafka.cruisecontrol.analyzer.OptimizerResult in project cruise-control by linkedin.
the class FixOfflineReplicasRunnable method workWithClusterModel.
@Override
protected OptimizerResult workWithClusterModel() throws KafkaCruiseControlException, TimeoutException, NotEnoughValidWindowsException {
ClusterModel clusterModel = _kafkaCruiseControl.clusterModel(_combinedCompletenessRequirements, _allowCapacityEstimation, _operationProgress);
// Ensure that the generated cluster model contains offline replicas.
sanityCheckOfflineReplicaPresence(clusterModel);
if (!clusterModel.isClusterAlive()) {
throw new IllegalArgumentException("All brokers are dead in the cluster.");
}
OptimizationOptions optimizationOptions = computeOptimizationOptions(clusterModel, false, _kafkaCruiseControl, Collections.emptySet(), _dryRun, _excludeRecentlyDemotedBrokers, _excludeRecentlyRemovedBrokers, _excludedTopics, Collections.emptySet(), false, _fastMode);
OptimizerResult result = _kafkaCruiseControl.optimizations(clusterModel, _goalsByPriority, _operationProgress, null, optimizationOptions);
if (!_dryRun) {
_kafkaCruiseControl.executeProposals(result.goalProposals(), Collections.emptySet(), false, _concurrentInterBrokerPartitionMovements, _maxInterBrokerPartitionMovements, null, _concurrentLeaderMovements, _executionProgressCheckIntervalMs, _replicaMovementStrategy, _replicationThrottle, _isTriggeredByUserRequest, _uuid, SKIP_AUTO_REFRESHING_CONCURRENCY);
}
return result;
}
use of com.linkedin.kafka.cruisecontrol.analyzer.OptimizerResult in project cruise-control by linkedin.
the class RebalanceRunnable method workWithoutClusterModel.
@Override
protected OptimizerResult workWithoutClusterModel() throws KafkaCruiseControlException {
ProposalsRunnable proposalsRunnable = new ProposalsRunnable(_kafkaCruiseControl, _future, _goals, _modelCompletenessRequirements, _allowCapacityEstimation, _excludedTopics, _excludeRecentlyDemotedBrokers, _excludeRecentlyRemovedBrokers, _ignoreProposalCache, _destinationBrokerIds, _isRebalanceDiskMode, _skipHardGoalCheck, !_isTriggeredByUserRequest, _fastMode);
OptimizerResult result = proposalsRunnable.computeResult();
if (!_dryRun) {
_kafkaCruiseControl.executeProposals(result.goalProposals(), Collections.emptySet(), isKafkaAssignerMode(_goals), _concurrentInterBrokerPartitionMovements, _maxInterBrokerPartitionMovements, _concurrentIntraBrokerPartitionMovements, _concurrentLeaderMovements, _executionProgressCheckIntervalMs, _replicaMovementStrategy, _replicationThrottle, _isTriggeredByUserRequest, _uuid, SKIP_AUTO_REFRESHING_CONCURRENCY);
}
return result;
}
Aggregations