use of com.linkedin.cruisecontrol.monitor.sampling.aggregator.AggregatedMetricValues in project cruise-control by linkedin.
the class PreferredLeaderElectionGoalTest method createReplicaAndSetLoad.
private void createReplicaAndSetLoad(ClusterModel clusterModel, String rack, int brokerId, TopicPartition tp, int index, boolean isLeader) {
clusterModel.createReplica(rack, brokerId, tp, index, isLeader);
MetricValues metricValues = new MetricValues(1);
Map<Integer, MetricValues> metricValuesByResource = new HashMap<>();
Resource.cachedValues().forEach(r -> metricValuesByResource.put(KafkaCruiseControlMetricDef.resourceToMetricId(r), metricValues));
clusterModel.setReplicaLoad(rack, brokerId, tp, new AggregatedMetricValues(metricValuesByResource), Collections.singletonList(1L));
}
use of com.linkedin.cruisecontrol.monitor.sampling.aggregator.AggregatedMetricValues in project cruise-control by linkedin.
the class LoadMonitor method populateSnapshots.
private void populateSnapshots(Cluster kafkaCluster, ClusterModel clusterModel, TopicPartition tp, ValuesAndExtrapolations valuesAndExtrapolations) {
PartitionInfo partitionInfo = kafkaCluster.partition(tp);
// If partition info does not exist, the topic may have been deleted.
if (partitionInfo != null) {
for (int index = 0; index < partitionInfo.replicas().length; index++) {
Node replica = partitionInfo.replicas()[index];
boolean isLeader;
if (partitionInfo.leader() == null) {
LOG.warn("Detected offline partition {}-{}, skipping", partitionInfo.topic(), partitionInfo.partition());
continue;
} else {
isLeader = replica.id() == partitionInfo.leader().id();
}
String rack = getRackHandleNull(replica);
// Note that we assume the capacity resolver can still return the broker capacity even if the broker
// is dead. We need this to get the host resource capacity.
Map<Resource, Double> brokerCapacity = _brokerCapacityConfigResolver.capacityForBroker(rack, replica.host(), replica.id());
clusterModel.createReplicaHandleDeadBroker(rack, replica.id(), tp, index, isLeader, brokerCapacity);
AggregatedMetricValues aggregatedMetricValues = valuesAndExtrapolations.metricValues();
clusterModel.setReplicaLoad(rack, replica.id(), tp, isLeader ? aggregatedMetricValues : MonitorUtils.toFollowerMetricValues(aggregatedMetricValues), valuesAndExtrapolations.windows());
}
}
}
use of com.linkedin.cruisecontrol.monitor.sampling.aggregator.AggregatedMetricValues in project cruise-control by linkedin.
the class KafkaCruiseControlUnitTestUtils method getAggregatedMetricValues.
/**
* Get the aggregated metric values with the given resource usage.
*/
public static AggregatedMetricValues getAggregatedMetricValues(double cpuUsage, double networkInBoundUsage, double networkOutBoundUsage, double diskUsage) {
double[] values = new double[Resource.cachedValues().size()];
values[KafkaCruiseControlMetricDef.resourceToMetricId(CPU)] = cpuUsage;
values[KafkaCruiseControlMetricDef.resourceToMetricId(NW_IN)] = networkInBoundUsage;
values[KafkaCruiseControlMetricDef.resourceToMetricId(NW_OUT)] = networkOutBoundUsage;
values[KafkaCruiseControlMetricDef.resourceToMetricId(DISK)] = diskUsage;
AggregatedMetricValues aggregateMetricValues = new AggregatedMetricValues();
for (Resource r : Resource.cachedValues()) {
int metricId = KafkaCruiseControlMetricDef.resourceToMetricId(r);
MetricValues metricValues = new MetricValues(1);
metricValues.set(0, values[metricId]);
aggregateMetricValues.add(metricId, metricValues);
}
return aggregateMetricValues;
}
use of com.linkedin.cruisecontrol.monitor.sampling.aggregator.AggregatedMetricValues in project cruise-control by linkedin.
the class ExcludedTopicsTest method unbalanced.
// two racks, three brokers, two partitions, one replica.
private static ClusterModel unbalanced() {
List<Integer> orderedRackIdsOfBrokers = Arrays.asList(0, 0, 1);
ClusterModel cluster = DeterministicCluster.getHomogeneousDeterministicCluster(2, orderedRackIdsOfBrokers, TestConstants.BROKER_CAPACITY);
// Create topic partition.
TopicPartition pInfoT10 = new TopicPartition("T1", 0);
TopicPartition pInfoT20 = new TopicPartition("T2", 0);
// Create replicas for topic: T1.
cluster.createReplica("0", 0, pInfoT10, 0, true);
cluster.createReplica("0", 0, pInfoT20, 0, true);
AggregatedMetricValues aggregatedMetricValues = KafkaCruiseControlUnitTestUtils.getAggregatedMetricValues(TestConstants.TYPICAL_CPU_CAPACITY / 2, TestConstants.LARGE_BROKER_CAPACITY / 2, TestConstants.MEDIUM_BROKER_CAPACITY / 2, TestConstants.LARGE_BROKER_CAPACITY / 2);
// Create snapshots and push them to the cluster.
cluster.setReplicaLoad("0", 0, pInfoT10, aggregatedMetricValues, Collections.singletonList(1L));
cluster.setReplicaLoad("0", 0, pInfoT20, aggregatedMetricValues, Collections.singletonList(1L));
return cluster;
}
use of com.linkedin.cruisecontrol.monitor.sampling.aggregator.AggregatedMetricValues in project cruise-control by linkedin.
the class ExcludedTopicsTest method unbalanced2.
// two racks, three brokers, six partitions, one replica.
private static ClusterModel unbalanced2() {
ClusterModel cluster = unbalanced();
// Create topic partition.
TopicPartition pInfoT30 = new TopicPartition("T1", 1);
TopicPartition pInfoT40 = new TopicPartition("T2", 1);
TopicPartition pInfoT50 = new TopicPartition("T1", 2);
TopicPartition pInfoT60 = new TopicPartition("T2", 2);
// Create replicas for topic: T1.
cluster.createReplica("0", 1, pInfoT30, 0, true);
cluster.createReplica("0", 0, pInfoT40, 0, true);
cluster.createReplica("0", 0, pInfoT50, 0, true);
cluster.createReplica("0", 0, pInfoT60, 0, true);
AggregatedMetricValues aggregatedMetricValues = KafkaCruiseControlUnitTestUtils.getAggregatedMetricValues(TestConstants.LARGE_BROKER_CAPACITY / 2, TestConstants.LARGE_BROKER_CAPACITY / 2, TestConstants.MEDIUM_BROKER_CAPACITY / 2, TestConstants.LARGE_BROKER_CAPACITY / 2);
// Create snapshots and push them to the cluster.
cluster.setReplicaLoad("0", 1, pInfoT30, aggregatedMetricValues, Collections.singletonList(1L));
cluster.setReplicaLoad("0", 0, pInfoT40, aggregatedMetricValues, Collections.singletonList(1L));
cluster.setReplicaLoad("0", 0, pInfoT50, aggregatedMetricValues, Collections.singletonList(1L));
cluster.setReplicaLoad("0", 0, pInfoT60, aggregatedMetricValues, Collections.singletonList(1L));
return cluster;
}
Aggregations