use of com.linkedin.kafka.cruisecontrol.monitor.sampling.holder.PartitionEntity in project cruise-control by linkedin.
the class LoadMonitor method clusterModel.
/**
* Get the cluster load model for a time range.
*
* @param from start of the time window
* @param to end of the time window
* @param requirements the load completeness requirements.
* @param populateReplicaPlacementInfo whether populate replica placement information.
* @param allowCapacityEstimation whether allow capacity estimation in cluster model if the underlying live broker capacity is unavailable.
* @param operationProgress the progress of the job to report.
* @return A cluster model with the available snapshots whose timestamp is in the given window.
* @throws NotEnoughValidWindowsException If there is not enough sample to generate cluster model.
* @throws TimeoutException If broker capacity resolver is unable to resolve broker capacity in time.
* @throws BrokerCapacityResolutionException If broker capacity resolver fails to resolve broker capacity.
*/
public ClusterModel clusterModel(long from, long to, ModelCompletenessRequirements requirements, boolean populateReplicaPlacementInfo, boolean allowCapacityEstimation, OperationProgress operationProgress) throws NotEnoughValidWindowsException, TimeoutException, BrokerCapacityResolutionException {
long startMs = _time.milliseconds();
MetadataClient.ClusterAndGeneration clusterAndGeneration = refreshClusterAndGeneration();
Cluster cluster = clusterAndGeneration.cluster();
// Get the metric aggregation result.
MetricSampleAggregationResult<String, PartitionEntity> partitionMetricSampleAggregationResult = _partitionMetricSampleAggregator.aggregate(cluster, from, to, requirements, operationProgress);
Map<PartitionEntity, ValuesAndExtrapolations> partitionValuesAndExtrapolations = partitionMetricSampleAggregationResult.valuesAndExtrapolations();
GeneratingClusterModel step = new GeneratingClusterModel(partitionValuesAndExtrapolations.size());
operationProgress.addStep(step);
// Create an empty cluster model first.
long currentLoadGeneration = partitionMetricSampleAggregationResult.generation();
ModelGeneration modelGeneration = new ModelGeneration(clusterAndGeneration.generation(), currentLoadGeneration);
ClusterModel clusterModel = new ClusterModel(modelGeneration, partitionMetricSampleAggregationResult.validEntityRatioOfCompleteness());
final Timer.Context ctx = _clusterModelCreationTimer.time();
try {
populateClusterCapacity(populateReplicaPlacementInfo, allowCapacityEstimation, clusterModel, cluster);
// Populate replica placement information for the cluster model if requested.
Map<TopicPartition, Map<Integer, String>> replicaPlacementInfo = null;
if (populateReplicaPlacementInfo) {
replicaPlacementInfo = getReplicaPlacementInfo(clusterModel, cluster, _adminClient, _config);
}
// Populate snapshots for the cluster model.
for (Map.Entry<PartitionEntity, ValuesAndExtrapolations> entry : partitionValuesAndExtrapolations.entrySet()) {
TopicPartition tp = entry.getKey().tp();
ValuesAndExtrapolations leaderLoad = entry.getValue();
populatePartitionLoad(cluster, clusterModel, tp, leaderLoad, replicaPlacementInfo, _brokerCapacityConfigResolver, allowCapacityEstimation);
step.incrementPopulatedNumPartitions();
}
// Set the state of bad brokers in clusterModel based on the Kafka cluster state.
setBadBrokerState(clusterModel, cluster);
if (LOG.isDebugEnabled()) {
LOG.debug("Generated cluster model in {} ms", _time.milliseconds() - startMs);
}
} finally {
ctx.stop();
}
return clusterModel;
}
use of com.linkedin.kafka.cruisecontrol.monitor.sampling.holder.PartitionEntity in project cruise-control by linkedin.
the class KafkaPartitionMetricSampleAggregatorTest method setupScenario4.
/**
* 3 Topics with 2 partitions each.
* T0P1 has all the windows with AVG_AVAILABLE as extrapolations.
* T1P1 misses window 6000 (index=5), 7000 (index=6)
* All other partitions have full data.
* @return Setup scenario #4
*/
private TestContext setupScenario4() {
TopicPartition t0p1 = new TopicPartition(TOPIC0, 1);
TopicPartition t1p0 = new TopicPartition("TOPIC1", 0);
TopicPartition t1p1 = new TopicPartition("TOPIC1", 1);
TopicPartition t2p0 = new TopicPartition("TOPIC2", 0);
TopicPartition t2p1 = new TopicPartition("TOPIC2", 1);
List<TopicPartition> allPartitions = Arrays.asList(TP, t0p1, t1p0, t1p1, t2p0, t2p1);
Properties props = getLoadMonitorProperties();
props.setProperty(MAX_ALLOWED_EXTRAPOLATIONS_PER_PARTITION_CONFIG, "0");
KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(props);
Metadata metadata = getMetadata(allPartitions);
KafkaPartitionMetricSampleAggregator aggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);
for (TopicPartition tp : Arrays.asList(TP, t1p0, t2p0, t2p1)) {
populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, aggregator, tp);
}
// Let t0p1 have too many extrapolations.
populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW - 1, aggregator, t0p1);
// let t1p1 miss another earlier window
populateSampleAggregator(5, MIN_SAMPLES_PER_WINDOW, aggregator, t1p1);
CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 6, MIN_SAMPLES_PER_WINDOW, aggregator, new PartitionEntity(t1p1), 7, WINDOW_MS, KafkaMetricDef.commonMetricDef());
return new TestContext(metadata, aggregator);
}
use of com.linkedin.kafka.cruisecontrol.monitor.sampling.holder.PartitionEntity in project cruise-control by linkedin.
the class KafkaPartitionMetricSampleAggregatorTest method testExcludeInvalidMetricSample.
@Test
public void testExcludeInvalidMetricSample() throws NotEnoughValidWindowsException {
KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
Metadata metadata = getMetadata(Collections.singleton(TP));
KafkaPartitionMetricSampleAggregator metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);
MetricDef metricDef = KafkaMetricDef.commonMetricDef();
populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);
// Set the leader to be node 1, which is different from the leader in the metadata.
PartitionMetricSample sampleWithDifferentLeader = new PartitionMetricSample(1, TP);
sampleWithDifferentLeader.record(metricDef.metricInfo(DISK_USAGE.name()), 10000);
sampleWithDifferentLeader.record(metricDef.metricInfo(CPU_USAGE.name()), 10000);
sampleWithDifferentLeader.record(metricDef.metricInfo(LEADER_BYTES_IN.name()), 10000);
sampleWithDifferentLeader.record(metricDef.metricInfo(LEADER_BYTES_OUT.name()), 10000);
sampleWithDifferentLeader.close(0);
// Only populate the CPU metric
PartitionMetricSample incompletePartitionMetricSample = new PartitionMetricSample(0, TP);
incompletePartitionMetricSample.record(metricDef.metricInfo(CPU_USAGE.name()), 10000);
incompletePartitionMetricSample.close(0);
metricSampleAggregator.addSample(sampleWithDifferentLeader);
metricSampleAggregator.addSample(incompletePartitionMetricSample);
// Check the window value and make sure the metric samples above are excluded.
Map<PartitionEntity, ValuesAndExtrapolations> valuesAndExtrapolations = metricSampleAggregator.aggregate(metadata.fetch(), NUM_WINDOWS * WINDOW_MS, new OperationProgress()).valuesAndExtrapolations();
ValuesAndExtrapolations partitionValuesAndExtrapolations = valuesAndExtrapolations.get(PE);
for (Resource resource : Resource.cachedValues()) {
Collection<Short> metricIds = KafkaMetricDef.resourceToMetricIds(resource);
double expectedValue = (resource == Resource.DISK ? MIN_SAMPLES_PER_WINDOW - 1 : (MIN_SAMPLES_PER_WINDOW - 1) / 2.0) / (resource == Resource.CPU ? UNIT_INTERVAL_TO_PERCENTAGE : 1.0) * metricIds.size();
assertEquals("The utilization for " + resource + " should be " + expectedValue, expectedValue, partitionValuesAndExtrapolations.metricValues().valuesForGroup(resource.name(), KafkaMetricDef.commonMetricDef(), true).get(NUM_WINDOWS - 1), 0.01);
}
}
use of com.linkedin.kafka.cruisecontrol.monitor.sampling.holder.PartitionEntity in project cruise-control by linkedin.
the class KafkaPartitionMetricSampleAggregatorTest method testAggregateWithUpdatedCluster.
@Test
public void testAggregateWithUpdatedCluster() throws NotEnoughValidWindowsException {
KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
Metadata metadata = getMetadata(Collections.singleton(TP));
KafkaPartitionMetricSampleAggregator metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);
populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);
TopicPartition tp1 = new TopicPartition(TOPIC0 + "1", 0);
Cluster cluster = getCluster(Arrays.asList(TP, tp1));
List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(2);
topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, TOPIC0, false, Collections.singletonList(new MetadataResponse.PartitionMetadata(Errors.NONE, PARTITION, NODE_0, Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH), Arrays.asList(nodes()), Arrays.asList(nodes()), Collections.emptyList()))));
topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, TOPIC0 + "1", false, Collections.singletonList(new MetadataResponse.PartitionMetadata(Errors.NONE, 0, NODE_0, Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH), Arrays.asList(nodes()), Arrays.asList(nodes()), Collections.emptyList()))));
MetadataResponse metadataResponse = KafkaCruiseControlUtils.prepareMetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), MetadataResponse.NO_CONTROLLER_ID, topicMetadata);
metadata.update(KafkaCruiseControlUtils.REQUEST_VERSION_UPDATE, metadataResponse, 1);
Map<PartitionEntity, ValuesAndExtrapolations> aggregateResult = metricSampleAggregator.aggregate(cluster, Long.MAX_VALUE, new OperationProgress()).valuesAndExtrapolations();
// Partition "topic-0" should be valid in all NUM_WINDOW windows and Partition "topic1-0" should not since
// there is no sample for it.
assertEquals(1, aggregateResult.size());
assertEquals(NUM_WINDOWS, aggregateResult.get(PE).windows().size());
ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, true);
MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(cluster, -1, Long.MAX_VALUE, requirements, new OperationProgress());
aggregateResult = result.valuesAndExtrapolations();
assertNotNull("tp1 should be included because includeAllTopics is set to true", aggregateResult.get(new PartitionEntity(tp1)));
Map<Integer, Extrapolation> extrapolations = aggregateResult.get(new PartitionEntity(tp1)).extrapolations();
assertEquals(NUM_WINDOWS, extrapolations.size());
for (int i = 0; i < NUM_WINDOWS; i++) {
assertEquals(Extrapolation.NO_VALID_EXTRAPOLATION, extrapolations.get(i));
}
}
use of com.linkedin.kafka.cruisecontrol.monitor.sampling.holder.PartitionEntity in project cruise-control by linkedin.
the class KafkaPartitionMetricSampleAggregatorTest method testFallbackToAvgAdjacent.
@Test
public void testFallbackToAvgAdjacent() throws NotEnoughValidWindowsException {
KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
TopicPartition anotherTopicPartition = new TopicPartition("AnotherTopic", 1);
PartitionEntity anotherPartitionEntity = new PartitionEntity(anotherTopicPartition);
Metadata metadata = getMetadata(Arrays.asList(TP, anotherTopicPartition));
KafkaPartitionMetricSampleAggregator metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);
// Only give one sample to the aggregator for previous period.
populateSampleAggregator(NUM_WINDOWS, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);
// Create let (NUM_WINDOWS + 1) have enough samples.
CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, NUM_WINDOWS, WINDOW_MS, KafkaMetricDef.commonMetricDef());
// Let a window exist but not containing samples for partition 0
CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, anotherPartitionEntity, NUM_WINDOWS + 1, WINDOW_MS, KafkaMetricDef.commonMetricDef());
// Let the rest of the window has enough samples.
CruiseControlUnitTestUtils.populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, NUM_WINDOWS + 2, WINDOW_MS, KafkaMetricDef.commonMetricDef());
MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(metadata.fetch(), NUM_WINDOWS * WINDOW_MS * 2, new OperationProgress());
int numWindows = result.valuesAndExtrapolations().get(PE).metricValues().length();
assertEquals(NUM_WINDOWS, numWindows);
int numExtrapolations = 0;
for (Map.Entry<Integer, Extrapolation> entry : result.valuesAndExtrapolations().get(PE).extrapolations().entrySet()) {
assertEquals(Extrapolation.AVG_ADJACENT, entry.getValue());
numExtrapolations++;
}
assertEquals(1, numExtrapolations);
}
Aggregations