use of com.linkedin.kafka.cruisecontrol.metricsreporter.metric.PartitionMetric in project cruise-control by linkedin.
the class CruiseControlMetricsProcessorTest method testMissingPartitionSizeMetric.
@Test
public void testMissingPartitionSizeMetric() throws TimeoutException, BrokerCapacityResolutionException {
CruiseControlMetricsProcessor processor = new CruiseControlMetricsProcessor(mockBrokerCapacityConfigResolver(), false);
Set<CruiseControlMetric> metrics = getCruiseControlMetrics();
for (CruiseControlMetric metric : metrics) {
boolean shouldAdd = true;
if (metric.rawMetricType() == RawMetricType.PARTITION_SIZE) {
PartitionMetric pm = (PartitionMetric) metric;
if (pm.topic().equals(TOPIC1) && pm.partition() == P0) {
shouldAdd = false;
}
}
if (shouldAdd) {
processor.addMetric(metric);
}
}
Cluster cluster = getCluster();
MetricSampler.Samples samples = processor.process(cluster, TEST_PARTITIONS, MetricSampler.SamplingMode.ALL);
assertEquals("Should have ignored partition " + T1P0, 3, samples.partitionMetricSamples().size());
assertEquals("Should have reported both brokers", 2, samples.brokerMetricSamples().size());
}
use of com.linkedin.kafka.cruisecontrol.metricsreporter.metric.PartitionMetric in project cruise-control by linkedin.
the class BrokerLoad method recordMetric.
/**
* Record the given Cruise Control metric.
*
* @param ccm Cruise Control metric.
*/
public void recordMetric(CruiseControlMetric ccm) {
RawMetricType rawMetricType = ccm.rawMetricType();
switch(rawMetricType.metricScope()) {
case BROKER:
_brokerMetrics.recordCruiseControlMetric(ccm);
break;
case TOPIC:
TopicMetric tm = (TopicMetric) ccm;
_dotHandledTopicMetrics.computeIfAbsent(tm.topic(), t -> new RawMetricsHolder()).recordCruiseControlMetric(ccm);
break;
case PARTITION:
PartitionMetric pm = (PartitionMetric) ccm;
_dotHandledPartitionMetrics.computeIfAbsent(new TopicPartition(pm.topic(), pm.partition()), tp -> new RawMetricsHolder()).recordCruiseControlMetric(ccm);
_dotHandledTopicsWithPartitionSizeReported.add(pm.topic());
break;
default:
throw new IllegalStateException(String.format("Should never be here. Unrecognized metric scope %s", rawMetricType.metricScope()));
}
}
use of com.linkedin.kafka.cruisecontrol.metricsreporter.metric.PartitionMetric in project cruise-control by linkedin.
the class PrometheusMetricSampler method addPartitionMetrics.
private int addPartitionMetrics(Cluster cluster, RawMetricType metricType, PrometheusQueryResult queryResult) throws InvalidPrometheusResultException {
int brokerId = getBrokerId(cluster, queryResult);
String topic = getTopic(queryResult);
int partition = getPartition(queryResult);
int metricsAdded = 0;
for (PrometheusValue value : queryResult.values()) {
addMetricForProcessing(new PartitionMetric(metricType, value.epochSeconds() * SEC_TO_MS, brokerId, topic, partition, value.value()));
metricsAdded++;
}
return metricsAdded;
}
use of com.linkedin.kafka.cruisecontrol.metricsreporter.metric.PartitionMetric in project cruise-control by linkedin.
the class CruiseControlMetricsProcessorTest method getCruiseControlMetrics.
/**
* <ul>
* <li>T1P0(B0): NW_IN = {@link #B0_TOPIC1_BYTES_IN} KB, NW_OUT = {@link #B0_TOPIC1_BYTES_OUT} KB,
* size = {@link #T1P0_BYTES_SIZE} MB</li>
* <li>T1P1(B1): NW_IN = {@link #B1_TOPIC1_BYTES_IN} KB, NW_OUT = {@link #B1_TOPIC1_BYTES_OUT} KB,
* size = {@link #T1P1_BYTES_SIZE} MB</li>
* <li>T2P0(B0): NW_IN = est. {@link #B0_TOPIC2_BYTES_IN}/2 KB, NW_OUT = est. {@link #B0_TOPIC2_BYTES_OUT}/2 KB,
* size = {@link #T2P0_BYTES_SIZE} MB</li>
* <li>T2P1(B0): NW_IN = est. {@link #B0_TOPIC2_BYTES_IN}/2 KB, NW_OUT = est. {@link #B0_TOPIC2_BYTES_OUT}/2 KB,
* size = {@link #T2P1_BYTES_SIZE} MB</li>
* <li>B0: CPU = {@link #B0_CPU}%</li>
* <li>B1: CPU = {@link #B1_CPU}%</li>
* </ul>
* @return Cruise Control metrics.
*/
private Set<CruiseControlMetric> getCruiseControlMetrics() {
Set<CruiseControlMetric> metrics = new HashSet<>();
int i = 0;
for (RawMetricType rawMetricType : RawMetricType.brokerMetricTypesDiffForVersion(BrokerMetricSample.MIN_SUPPORTED_VERSION)) {
switch(rawMetricType) {
case ALL_TOPIC_BYTES_IN:
metrics.add(new BrokerMetric(RawMetricType.ALL_TOPIC_BYTES_IN, _time.milliseconds(), BROKER_ID_0, B0_ALL_TOPIC_BYTES_IN * BYTES_IN_KB));
metrics.add(new BrokerMetric(RawMetricType.ALL_TOPIC_BYTES_IN, _time.milliseconds(), BROKER_ID_1, B1_ALL_TOPIC_BYTES_IN * BYTES_IN_KB));
break;
case ALL_TOPIC_BYTES_OUT:
metrics.add(new BrokerMetric(RawMetricType.ALL_TOPIC_BYTES_OUT, _time.milliseconds(), BROKER_ID_0, B0_ALL_TOPIC_BYTES_OUT * BYTES_IN_KB));
metrics.add(new BrokerMetric(RawMetricType.ALL_TOPIC_BYTES_OUT, _time.milliseconds(), BROKER_ID_1, B1_ALL_TOPIC_BYTES_OUT * BYTES_IN_KB));
break;
case BROKER_CPU_UTIL:
metrics.add(new BrokerMetric(RawMetricType.BROKER_CPU_UTIL, _time.milliseconds(), BROKER_ID_0, B0_CPU));
metrics.add(new BrokerMetric(RawMetricType.BROKER_CPU_UTIL, _time.milliseconds(), BROKER_ID_1, B1_CPU));
break;
default:
metrics.add(new BrokerMetric(rawMetricType, _time.milliseconds(), BROKER_ID_0, i++ * BYTES_IN_MB));
metrics.add(new BrokerMetric(rawMetricType, _time.milliseconds(), BROKER_ID_1, i++ * BYTES_IN_MB));
break;
}
}
for (RawMetricType rawMetricType : RawMetricType.topicMetricTypes()) {
switch(rawMetricType) {
case TOPIC_BYTES_IN:
metrics.add(new TopicMetric(TOPIC_BYTES_IN, _time.milliseconds() + 1, BROKER_ID_0, TOPIC1, B0_TOPIC1_BYTES_IN * BYTES_IN_KB));
metrics.add(new TopicMetric(TOPIC_BYTES_IN, _time.milliseconds() + 2, BROKER_ID_1, TOPIC1, B1_TOPIC1_BYTES_IN * BYTES_IN_KB));
metrics.add(new TopicMetric(TOPIC_BYTES_IN, _time.milliseconds(), BROKER_ID_0, TOPIC2, B0_TOPIC2_BYTES_IN * BYTES_IN_KB));
break;
case TOPIC_BYTES_OUT:
metrics.add(new TopicMetric(RawMetricType.TOPIC_BYTES_OUT, _time.milliseconds(), BROKER_ID_0, TOPIC1, B0_TOPIC1_BYTES_OUT * BYTES_IN_KB));
metrics.add(new TopicMetric(RawMetricType.TOPIC_BYTES_OUT, _time.milliseconds(), BROKER_ID_1, TOPIC1, B1_TOPIC1_BYTES_OUT * BYTES_IN_KB));
metrics.add(new TopicMetric(RawMetricType.TOPIC_BYTES_OUT, _time.milliseconds(), BROKER_ID_0, TOPIC2, B0_TOPIC2_BYTES_OUT * BYTES_IN_KB));
break;
case TOPIC_REPLICATION_BYTES_IN:
metrics.add(new TopicMetric(RawMetricType.TOPIC_REPLICATION_BYTES_IN, _time.milliseconds(), BROKER_ID_1, TOPIC1, B1_TOPIC1_REPLICATION_BYTES_IN * BYTES_IN_KB));
metrics.add(new TopicMetric(RawMetricType.TOPIC_REPLICATION_BYTES_IN, _time.milliseconds(), BROKER_ID_0, TOPIC1, B0_TOPIC1_REPLICATION_BYTES_IN * BYTES_IN_KB));
metrics.add(new TopicMetric(RawMetricType.TOPIC_REPLICATION_BYTES_IN, _time.milliseconds(), BROKER_ID_1, TOPIC2, B1_TOPIC2_REPLICATION_BYTES_IN * BYTES_IN_KB));
break;
case TOPIC_REPLICATION_BYTES_OUT:
metrics.add(new TopicMetric(RawMetricType.TOPIC_REPLICATION_BYTES_OUT, _time.milliseconds(), BROKER_ID_0, TOPIC1, B0_TOPIC1_REPLICATION_BYTES_OUT * BYTES_IN_KB));
metrics.add(new TopicMetric(RawMetricType.TOPIC_REPLICATION_BYTES_OUT, _time.milliseconds(), BROKER_ID_1, TOPIC1, B1_TOPIC1_REPLICATION_BYTES_OUT * BYTES_IN_KB));
metrics.add(new TopicMetric(RawMetricType.TOPIC_REPLICATION_BYTES_OUT, _time.milliseconds(), BROKER_ID_0, TOPIC2, B0_TOPIC2_REPLICATION_BYTES_OUT * BYTES_IN_KB));
break;
default:
metrics.add(new TopicMetric(rawMetricType, _time.milliseconds(), BROKER_ID_0, TOPIC1, i * BYTES_IN_MB));
metrics.add(new TopicMetric(rawMetricType, _time.milliseconds(), BROKER_ID_1, TOPIC1, i * BYTES_IN_MB));
metrics.add(new TopicMetric(rawMetricType, _time.milliseconds(), BROKER_ID_0, TOPIC2, i * BYTES_IN_MB));
metrics.add(new TopicMetric(rawMetricType, _time.milliseconds(), BROKER_ID_1, TOPIC2, i * BYTES_IN_MB));
break;
}
}
metrics.add(new PartitionMetric(RawMetricType.PARTITION_SIZE, _time.milliseconds(), BROKER_ID_0, TOPIC1, P0, T1P0_BYTES_SIZE * BYTES_IN_MB));
metrics.add(new PartitionMetric(RawMetricType.PARTITION_SIZE, _time.milliseconds(), BROKER_ID_0, TOPIC1, P1, T1P1_BYTES_SIZE * BYTES_IN_MB));
metrics.add(new PartitionMetric(RawMetricType.PARTITION_SIZE, _time.milliseconds(), BROKER_ID_0, TOPIC2, P0, T2P0_BYTES_SIZE * BYTES_IN_MB));
metrics.add(new PartitionMetric(RawMetricType.PARTITION_SIZE, _time.milliseconds(), BROKER_ID_0, TOPIC2, P1, T2P1_BYTES_SIZE * BYTES_IN_MB));
metrics.add(new PartitionMetric(RawMetricType.PARTITION_SIZE, _time.milliseconds(), BROKER_ID_1, TOPIC1, P0, T1P0_BYTES_SIZE * BYTES_IN_MB));
metrics.add(new PartitionMetric(RawMetricType.PARTITION_SIZE, _time.milliseconds(), BROKER_ID_1, TOPIC1, P1, T1P1_BYTES_SIZE * BYTES_IN_MB));
metrics.add(new PartitionMetric(RawMetricType.PARTITION_SIZE, _time.milliseconds(), BROKER_ID_1, TOPIC2, P0, T2P0_BYTES_SIZE * BYTES_IN_MB));
metrics.add(new PartitionMetric(RawMetricType.PARTITION_SIZE, _time.milliseconds(), BROKER_ID_1, TOPIC2, P1, T2P1_BYTES_SIZE * BYTES_IN_MB));
return metrics;
}
Aggregations