use of com.linkedin.kafka.cruisecontrol.monitor.sampling.holder.BrokerMetricSample in project cruise-control by linkedin.
the class CruiseControlMetricsProcessorTest method testBasic.
@Test
public void testBasic() throws TimeoutException, BrokerCapacityResolutionException {
CruiseControlMetricsProcessor processor = new CruiseControlMetricsProcessor(mockBrokerCapacityConfigResolver(), false);
Set<CruiseControlMetric> metrics = getCruiseControlMetrics();
Cluster cluster = getCluster();
metrics.forEach(processor::addMetric);
MetricSampler.Samples samples = processor.process(cluster, TEST_PARTITIONS, MetricSampler.SamplingMode.ALL);
for (Node node : cluster.nodes()) {
assertEquals(MOCK_NUM_CPU_CORES, (short) processor.cachedNumCoresByBroker().get(node.id()));
}
assertEquals(4, samples.partitionMetricSamples().size());
assertEquals(2, samples.brokerMetricSamples().size());
for (PartitionMetricSample sample : samples.partitionMetricSamples()) {
if (sample.entity().tp().equals(T1P0)) {
validatePartitionMetricSample(sample, _time.milliseconds() + 2, CPU_UTIL.get(T1P0), B0_TOPIC1_BYTES_IN, B0_TOPIC1_BYTES_OUT, T1P0_BYTES_SIZE);
} else if (sample.entity().tp().equals(T1P1)) {
validatePartitionMetricSample(sample, _time.milliseconds() + 2, CPU_UTIL.get(T1P1), B1_TOPIC1_BYTES_IN, B1_TOPIC1_BYTES_OUT, T1P1_BYTES_SIZE);
} else if (sample.entity().tp().equals(T2P0)) {
validatePartitionMetricSample(sample, _time.milliseconds() + 2, CPU_UTIL.get(T2P0), B0_TOPIC2_BYTES_IN / 2, B0_TOPIC2_BYTES_OUT / 2, T2P0_BYTES_SIZE);
} else if (sample.entity().tp().equals(T2P1)) {
validatePartitionMetricSample(sample, _time.milliseconds() + 2, CPU_UTIL.get(T2P1), B0_TOPIC2_BYTES_IN / 2, B0_TOPIC2_BYTES_OUT / 2, T2P1_BYTES_SIZE);
} else {
fail("Should never have partition " + sample.entity().tp());
}
}
for (BrokerMetricSample sample : samples.brokerMetricSamples()) {
if (sample.metricValue(CPU_USAGE) == B0_CPU) {
assertEquals(B0_TOPIC1_REPLICATION_BYTES_IN, sample.metricValue(REPLICATION_BYTES_IN_RATE), DELTA);
} else if (sample.metricValue(CPU_USAGE) == B1_CPU) {
assertEquals(B1_TOPIC1_REPLICATION_BYTES_IN + B1_TOPIC2_REPLICATION_BYTES_IN, sample.metricValue(REPLICATION_BYTES_IN_RATE), DELTA);
} else {
fail("Should never have broker cpu util " + sample.metricValue(CPU_USAGE));
}
}
assertFalse(samples.partitionMetricSamples().isEmpty());
}
use of com.linkedin.kafka.cruisecontrol.monitor.sampling.holder.BrokerMetricSample in project cruise-control by linkedin.
the class CruiseControlMetricsProcessor method addBrokerMetricSamples.
/**
* Add the broker metric samples to the provided set.
*
* @param cluster The Kafka cluster
* @param brokerMetricSamples The set to add the broker samples to.
* @return The number of skipped brokers.
*/
private int addBrokerMetricSamples(Cluster cluster, Set<BrokerMetricSample> brokerMetricSamples) {
int skippedBroker = 0;
for (Node node : cluster.nodes()) {
try {
BrokerMetricSample sample = buildBrokerMetricSample(node, _brokerLoad, _maxMetricTimestamp);
if (sample != null) {
LOG.trace("Added broker metric sample for broker {}.", node.id());
brokerMetricSamples.add(sample);
} else {
skippedBroker++;
}
} catch (UnknownVersionException e) {
LOG.error("Unrecognized serde version detected during broker metric sampling.", e);
skippedBroker++;
} catch (Exception e) {
LOG.error("Error building broker metric sample for {}.", node.id(), e);
skippedBroker++;
}
}
return skippedBroker;
}
use of com.linkedin.kafka.cruisecontrol.monitor.sampling.holder.BrokerMetricSample in project cruise-control by linkedin.
the class KafkaSampleStore method storeSamples.
@Override
public void storeSamples(MetricSampler.Samples samples) {
AtomicInteger metricSampleCount = storePartitionMetricSamples(samples, _producer, _partitionMetricSampleStoreTopic, LOG);
final AtomicInteger brokerMetricSampleCount = new AtomicInteger(0);
for (BrokerMetricSample sample : samples.brokerMetricSamples()) {
_producer.send(new ProducerRecord<>(_brokerMetricSampleStoreTopic, sample.toBytes()), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e == null) {
brokerMetricSampleCount.incrementAndGet();
} else {
LOG.error("Failed to produce model training sample due to exception", e);
}
}
});
}
_producer.flush();
if (LOG.isDebugEnabled()) {
LOG.debug("Stored {} partition metric samples and {} broker metric samples to Kafka", metricSampleCount.get(), brokerMetricSampleCount.get());
}
}
use of com.linkedin.kafka.cruisecontrol.monitor.sampling.holder.BrokerMetricSample in project cruise-control by linkedin.
the class SamplingUtils method buildBrokerMetricSample.
/**
* Create a {@link BrokerMetricSample}, record the relevant metrics for the given broker, and return the sample.
*
* @param node Node hosting the broker.
* @param brokerLoadById Load information for brokers by the broker id.
* @param maxMetricTimestamp Maximum timestamp of the sampled metric during the sampling process.
* @return Metric sample populated with broker metrics, or {@code null} if sample generation is skipped.
*/
static BrokerMetricSample buildBrokerMetricSample(Node node, Map<Integer, BrokerLoad> brokerLoadById, long maxMetricTimestamp) throws UnknownVersionException {
BrokerLoad brokerLoad = brokerLoadById.get(node.id());
if (skipBuildingBrokerMetricSample(brokerLoad, node.id())) {
return null;
}
MetricDef brokerMetricDef = KafkaMetricDef.brokerMetricDef();
BrokerMetricSample bms = new BrokerMetricSample(node.host(), node.id(), brokerLoad.brokerSampleDeserializationVersion());
for (Map.Entry<Byte, Set<RawMetricType>> entry : RawMetricType.brokerMetricTypesDiffByVersion().entrySet()) {
for (RawMetricType rawBrokerMetricType : entry.getValue()) {
// We require the broker to report all the metric types (including nullable values). Otherwise we skip the broker.
if (!brokerLoad.brokerMetricAvailable(rawBrokerMetricType)) {
LOG.warn("{}broker {} because it does not have {} metrics (serde version {}) or the metrics are inconsistent.", SKIP_BUILDING_SAMPLE_PREFIX, node.id(), rawBrokerMetricType, entry.getKey());
return null;
} else {
MetricInfo metricInfo = brokerMetricDef.metricInfo(KafkaMetricDef.forRawMetricType(rawBrokerMetricType).name());
double metricValue = brokerLoad.brokerMetric(rawBrokerMetricType);
bms.record(metricInfo, metricValue);
}
}
}
// Disk usage is not one of the broker raw metric type.
bms.record(brokerMetricDef.metricInfo(KafkaMetricDef.DISK_USAGE.name()), brokerLoad.diskUsage());
bms.close(maxMetricTimestamp);
return bms;
}
use of com.linkedin.kafka.cruisecontrol.monitor.sampling.holder.BrokerMetricSample in project cruise-control by linkedin.
the class SamplingFetcher method useBrokerMetricSamples.
@Override
protected void useBrokerMetricSamples(Set<BrokerMetricSample> brokerMetricSamples) {
Set<Integer> returnedBrokerIds = new HashSet<>();
if (brokerMetricSamples != null) {
int discarded = 0;
Iterator<BrokerMetricSample> iter = brokerMetricSamples.iterator();
while (iter.hasNext()) {
BrokerMetricSample brokerMetricSample = iter.next();
// Close the broker metric sample in case user forgot to close it.
brokerMetricSample.close(_endTimeMs);
if (_brokerMetricSampleAggregator.addSample(brokerMetricSample)) {
LOG.trace("Enqueued broker metric sample {}", brokerMetricSample);
} else {
iter.remove();
discarded++;
LOG.trace("Failed to add broker metric sample {}", brokerMetricSample);
}
returnedBrokerIds.add(brokerMetricSample.brokerId());
}
LOG.info("Collected {}{} broker metric samples for {} brokers.", brokerMetricSamples.size(), discarded > 0 ? String.format("(%d discarded)", discarded) : "", returnedBrokerIds.size());
// Add the broker metric samples to the observation.
ModelParameters.addMetricObservation(brokerMetricSamples);
} else {
LOG.warn("Failed to collect broker metrics samples.");
}
}
Aggregations