Search in sources :

Example 96 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project cruise-control by linkedin.

the class KafkaMetricSampleAggregatorTest method testFallbackToAvgAdjacent.

@Test
public void testFallbackToAvgAdjacent() throws NotEnoughValidWindowsException {
    KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
    TopicPartition anotherTopicPartition = new TopicPartition("AnotherTopic", 1);
    PartitionEntity anotherPartitionEntity = new PartitionEntity(anotherTopicPartition);
    Metadata metadata = getMetadata(Arrays.asList(TP, anotherTopicPartition));
    KafkaMetricSampleAggregator metricSampleAggregator = new KafkaMetricSampleAggregator(config, metadata);
    // Only give one sample to the aggregator for previous period.
    populateSampleAggregator(NUM_WINDOWS, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);
    // Create let (NUM_SNAPSHOT + 1) have enough samples.
    CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, NUM_WINDOWS, WINDOW_MS, KafkaCruiseControlMetricDef.metricDef());
    // Let a snapshot window exist but not containing samples for partition 0
    CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, anotherPartitionEntity, NUM_WINDOWS + 1, WINDOW_MS, KafkaCruiseControlMetricDef.metricDef());
    // Let the rest of the snapshot has enough samples.
    CruiseControlUnitTestUtils.populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, NUM_WINDOWS + 2, WINDOW_MS, KafkaCruiseControlMetricDef.metricDef());
    MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(clusterAndGeneration(metadata.fetch()), NUM_WINDOWS * WINDOW_MS * 2, new OperationProgress());
    int numSnapshots = result.valuesAndExtrapolations().get(PE).metricValues().length();
    assertEquals(NUM_WINDOWS, numSnapshots);
    int numExtrapolationss = 0;
    for (Map.Entry<Integer, Extrapolation> entry : result.valuesAndExtrapolations().get(PE).extrapolations().entrySet()) {
        assertEquals(Extrapolation.AVG_ADJACENT, entry.getValue());
        numExtrapolationss++;
    }
    assertEquals(1, numExtrapolationss);
}
Also used : OperationProgress(com.linkedin.kafka.cruisecontrol.async.progress.OperationProgress) PartitionEntity(com.linkedin.kafka.cruisecontrol.monitor.sampling.PartitionEntity) Metadata(org.apache.kafka.clients.Metadata) Extrapolation(com.linkedin.cruisecontrol.monitor.sampling.aggregator.Extrapolation) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaCruiseControlConfig(com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig) Map(java.util.Map) Test(org.junit.Test)

Example 97 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project cruise-control by linkedin.

the class KafkaMetricSampleAggregatorTest method testSnapshotWithPartitionExtrapolations.

@Test
public void testSnapshotWithPartitionExtrapolations() throws NotEnoughValidWindowsException {
    KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
    Metadata metadata = getMetadata(Collections.singleton(TP));
    KafkaMetricSampleAggregator metricSampleAggregator = new KafkaMetricSampleAggregator(config, metadata);
    TopicPartition tp1 = new TopicPartition(TOPIC, 1);
    Cluster cluster = getCluster(Arrays.asList(TP, tp1));
    PartitionEntity pe1 = new PartitionEntity(tp1);
    metadata.update(cluster, Collections.emptySet(), 1);
    populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);
    // Populate partition 1 but leave 1 hole at NUM_SNAPSHOT'th window.
    CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 2, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, pe1, 0, WINDOW_MS, KafkaCruiseControlMetricDef.metricDef());
    CruiseControlUnitTestUtils.populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, pe1, NUM_WINDOWS - 1, WINDOW_MS, KafkaCruiseControlMetricDef.metricDef());
    MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(clusterAndGeneration(cluster), Long.MAX_VALUE, new OperationProgress());
    assertEquals(2, result.valuesAndExtrapolations().size());
    assertTrue(result.valuesAndExtrapolations().get(PE).extrapolations().isEmpty());
    assertEquals(1, result.valuesAndExtrapolations().get(pe1).extrapolations().size());
    assertTrue(result.valuesAndExtrapolations().get(pe1).extrapolations().containsKey(1));
    assertEquals((NUM_WINDOWS - 1) * WINDOW_MS, result.valuesAndExtrapolations().get(pe1).window(1));
    assertEquals(Extrapolation.AVG_ADJACENT, result.valuesAndExtrapolations().get(pe1).extrapolations().get(1));
}
Also used : OperationProgress(com.linkedin.kafka.cruisecontrol.async.progress.OperationProgress) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionEntity(com.linkedin.kafka.cruisecontrol.monitor.sampling.PartitionEntity) Metadata(org.apache.kafka.clients.Metadata) KafkaCruiseControlConfig(com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig) Cluster(org.apache.kafka.common.Cluster) Test(org.junit.Test)

Example 98 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project cruise-control by linkedin.

the class KafkaMetricSampleAggregatorTest method testSnapshotWithUpdatedCluster.

@Test
public void testSnapshotWithUpdatedCluster() throws NotEnoughValidWindowsException {
    KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
    Metadata metadata = getMetadata(Collections.singleton(TP));
    KafkaMetricSampleAggregator metricSampleAggregator = new KafkaMetricSampleAggregator(config, metadata);
    populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);
    TopicPartition tp1 = new TopicPartition(TOPIC, 1);
    Cluster cluster = getCluster(Arrays.asList(TP, tp1));
    metadata.update(cluster, Collections.emptySet(), 1);
    Map<PartitionEntity, ValuesAndExtrapolations> snapshotsForPartition = metricSampleAggregator.aggregate(clusterAndGeneration(cluster), Long.MAX_VALUE, new OperationProgress()).valuesAndExtrapolations();
    assertTrue("tp1 should not be included because recent snapshot does not include all topics", snapshotsForPartition.isEmpty());
    ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, true);
    MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(clusterAndGeneration(cluster), -1, Long.MAX_VALUE, requirements, new OperationProgress());
    snapshotsForPartition = result.valuesAndExtrapolations();
    assertNotNull("tp1 should be included because includeAllTopics is set to true", snapshotsForPartition.get(new PartitionEntity(tp1)));
    Map<Integer, Extrapolation> extrapolationss = snapshotsForPartition.get(new PartitionEntity(tp1)).extrapolations();
    assertEquals(NUM_WINDOWS, extrapolationss.size());
    for (int i = 0; i < NUM_WINDOWS; i++) {
        assertEquals(Extrapolation.NO_VALID_EXTRAPOLATION, extrapolationss.get(i));
    }
}
Also used : OperationProgress(com.linkedin.kafka.cruisecontrol.async.progress.OperationProgress) PartitionEntity(com.linkedin.kafka.cruisecontrol.monitor.sampling.PartitionEntity) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) Extrapolation(com.linkedin.cruisecontrol.monitor.sampling.aggregator.Extrapolation) ValuesAndExtrapolations(com.linkedin.cruisecontrol.monitor.sampling.aggregator.ValuesAndExtrapolations) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaCruiseControlConfig(com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig) ModelCompletenessRequirements(com.linkedin.kafka.cruisecontrol.monitor.ModelCompletenessRequirements) Test(org.junit.Test)

Example 99 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project cruise-control by linkedin.

the class LoadMonitorTaskRunnerTest method testSimpleFetch.

@Test
public void testSimpleFetch() throws InterruptedException {
    KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
    Metadata metadata = new Metadata(10, 10, false);
    MetadataClient metadataClient = new MetadataClient(config, metadata, -1L, TIME);
    MockMetricSampleAggregator mockMetricSampleAggregator = new MockMetricSampleAggregator(config, metadata);
    List<MetricSampler> samplers = new ArrayList<>();
    MetricRegistry dropwizardMetricRegistry = new MetricRegistry();
    for (int i = 0; i < NUM_METRIC_FETCHERS; i++) {
        samplers.add(new MockSampler(0));
    }
    MetricFetcherManager fetcherManager = new MetricFetcherManager(config, mockMetricSampleAggregator, metadataClient, METRIC_DEF, TIME, dropwizardMetricRegistry, samplers);
    LoadMonitorTaskRunner loadMonitorTaskRunner = new LoadMonitorTaskRunner(config, fetcherManager, mockMetricSampleAggregator, metadataClient, TIME);
    while (metadata.fetch().topics().size() < NUM_TOPICS) {
        Thread.sleep(10);
        metadataClient.refreshMetadata();
    }
    loadMonitorTaskRunner.start(true);
    Set<TopicPartition> partitionsToSample = new HashSet<>();
    for (int i = 0; i < NUM_TOPICS; i++) {
        for (int j = 0; j < NUM_PARTITIONS; j++) {
            partitionsToSample.add(new TopicPartition("topic-" + i, j));
        }
    }
    long startMs = System.currentTimeMillis();
    BlockingQueue<PartitionMetricSample> sampleQueue = mockMetricSampleAggregator.metricSampleQueue();
    while (!partitionsToSample.isEmpty() && System.currentTimeMillis() < startMs + 10000) {
        PartitionMetricSample sample = sampleQueue.poll();
        if (sample != null) {
            assertTrue("The topic partition should have been sampled and sampled only once.", partitionsToSample.contains(sample.entity().tp()));
            partitionsToSample.remove(sample.entity().tp());
        }
    }
    assertTrue("Did not see sample for partitions " + Arrays.toString(partitionsToSample.toArray()), partitionsToSample.isEmpty());
    fetcherManager.shutdown();
    assertTrue(sampleQueue.isEmpty());
}
Also used : MetricFetcherManager(com.linkedin.kafka.cruisecontrol.monitor.sampling.MetricFetcherManager) MetricSampler(com.linkedin.kafka.cruisecontrol.monitor.sampling.MetricSampler) MetricRegistry(com.codahale.metrics.MetricRegistry) Metadata(org.apache.kafka.clients.Metadata) ArrayList(java.util.ArrayList) PartitionMetricSample(com.linkedin.kafka.cruisecontrol.monitor.sampling.PartitionMetricSample) MetadataClient(com.linkedin.kafka.cruisecontrol.common.MetadataClient) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaCruiseControlConfig(com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 100 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project cruise-control by linkedin.

the class DeterministicCluster method smallClusterModel.

/**
 * Generates a small scale cluster.
 * <p>
 * <li>Number of Partitions: 10.</li>
 * <li>Topics: T1, T2</li>
 * <li>Replication factor/Topic: T1: 2, T2: 2</li>
 * <li>Partitions/Topic: T1: 6, T2: 4</li>
 * <p>
 * <h3>Replica Distribution</h3>
 * <li>B0: T1_P1_leader, T1_P2_follower, T2_P3_leader, T2_P2_leader</li>
 * <li>B1: T1_P2_leader, T2_P1_leader, T2_P3_follower</li>
 * <li>B2: T2_P2_follower, T1_P1_follower, T2_P1_follower</li>
 * <p>
 * <h3>Load on Partitions</h3>
 * <p>
 * <ul>
 * <li>T1_P1_leader:</li>
 * <ul>
 * <li>CPU: 100.0</li>
 * <li>DISK: 75.0</li>
 * <li>INBOUND NW: 100.0</li>
 * <li>OUTBOUND NW: 130.0</li>
 * </ul>
 * <li>T1_P1_follower:</li>
 * <ul>
 * <li>CPU: 5.0</li>
 * <li>DISK: 75.0</li>
 * <li>INBOUND NW: 100.0</li>
 * <li>OUTBOUND NW: 0.0</li>
 * </ul>
 * <li>T1_P2_leader:</li>
 * <ul>
 * <li>CPU: 40.5</li>
 * <li>DISK: 55.0</li>
 * <li>INBOUND NW: 90.0</li>
 * <li>OUTBOUND NW: 110.0</li>
 * </ul>
 * <li>T1_P2_follower:</li>
 * <ul>
 * <li>CPU: 80.5</li>
 * <li>DISK: 55.0</li>
 * <li>INBOUND NW: 90.0</li>
 * <li>OUTBOUND NW: 0.0</li>
 * </ul>
 * <li>T2_P1_leader:</li>
 * <ul>
 * <li>CPU: 5.0</li>
 * <li>DISK: 5.0</li>
 * <li>INBOUND NW: 5.0</li>
 * <li>OUTBOUND NW: 6.0</li>
 * </ul>
 * <li>T2_P1_follower:</li>
 * <ul>
 * <li>CPU: 4.0</li>
 * <li>DISK: 5.0</li>
 * <li>INBOUND NW: 5.0</li>
 * <li>OUTBOUND NW: 0.0</li>
 * </ul>
 * <li>T2_P2_leader:</li>
 * <ul>
 * <li>CPU: 100.0</li>
 * <li>DISK: 55.0</li>
 * <li>INBOUND NW: 25.0</li>
 * <li>OUTBOUND NW: 45.0</li>
 * </ul>
 * <li>T2_P2_follower:</li>
 * <ul>
 * <li>CPU: 20.5</li>
 * <li>DISK: 55.0</li>
 * <li>INBOUND NW: 25.0</li>
 * <li>OUTBOUND NW: 0.0</li>
 * </ul>
 * <li>T2_P3_leader:</li>
 * <ul>
 * <li>CPU: 85.0</li>
 * <li>DISK: 95.0</li>
 * <li>INBOUND NW: 45.0</li>
 * <li>OUTBOUND NW: 120.0</li>
 * </ul>
 * <li>T2_P3_follower:</li>
 * <ul>
 * <li>CPU: 55.0</li>
 * <li>DISK: 95.0</li>
 * <li>INBOUND NW: 45.0</li>
 * <li>OUTBOUND NW: 0.0</li>
 * </ul>
 * </ul>
 *
 * @return Small scale cluster.
 */
public static ClusterModel smallClusterModel(Map<Resource, Double> brokerCapacity) {
    List<Integer> orderedRackIdsOfBrokers = Arrays.asList(0, 0, 1);
    ClusterModel cluster = getHomogeneousDeterministicCluster(2, orderedRackIdsOfBrokers, brokerCapacity);
    // Create topic partition.
    TopicPartition pInfoT10 = new TopicPartition("T1", 0);
    TopicPartition pInfoT11 = new TopicPartition("T1", 1);
    TopicPartition pInfoT20 = new TopicPartition("T2", 0);
    TopicPartition pInfoT21 = new TopicPartition("T2", 1);
    TopicPartition pInfoT22 = new TopicPartition("T2", 2);
    // Create replicas for topic: T1.
    cluster.createReplica("0", 0, pInfoT10, 0, true);
    cluster.createReplica("1", 2, pInfoT10, 1, false);
    cluster.createReplica("0", 1, pInfoT11, 0, true);
    cluster.createReplica("0", 0, pInfoT11, 1, false);
    // Create replicas for topic: T2.
    cluster.createReplica("0", 1, pInfoT20, 0, true);
    cluster.createReplica("1", 2, pInfoT20, 1, false);
    cluster.createReplica("0", 0, pInfoT21, 0, true);
    cluster.createReplica("1", 2, pInfoT21, 1, false);
    cluster.createReplica("0", 0, pInfoT22, 0, true);
    cluster.createReplica("0", 1, pInfoT22, 1, false);
    // Create snapshots and push them to the cluster.
    List<Long> windows = Collections.singletonList(1L);
    cluster.setReplicaLoad("0", 0, pInfoT10, createLoad(100.0, 100.0, 130.0, 75.0), windows);
    cluster.setReplicaLoad("1", 2, pInfoT10, createLoad(5.0, 100.0, 0.0, 75.0), windows);
    cluster.setReplicaLoad("0", 1, pInfoT11, createLoad(40.5, 90.0, 110.0, 55.0), windows);
    cluster.setReplicaLoad("0", 0, pInfoT11, createLoad(80.5, 90.0, 0.0, 55.0), windows);
    cluster.setReplicaLoad("0", 1, pInfoT20, createLoad(5.0, 5.0, 6.0, 5.0), windows);
    cluster.setReplicaLoad("1", 2, pInfoT20, createLoad(4.0, 5.0, 0.0, 5.0), windows);
    cluster.setReplicaLoad("0", 0, pInfoT21, createLoad(100.0, 25.0, 45.0, 55.0), windows);
    cluster.setReplicaLoad("1", 2, pInfoT21, createLoad(20.5, 25.0, 0.0, 55.0), windows);
    cluster.setReplicaLoad("0", 0, pInfoT22, createLoad(85.0, 45.0, 120.0, 95.0), windows);
    cluster.setReplicaLoad("0", 1, pInfoT22, createLoad(55.0, 45.0, 0.0, 95.0), windows);
    return cluster;
}
Also used : ClusterModel(com.linkedin.kafka.cruisecontrol.model.ClusterModel) TopicPartition(org.apache.kafka.common.TopicPartition)

Aggregations

TopicPartition (org.apache.kafka.common.TopicPartition)1729 HashMap (java.util.HashMap)744 Test (org.junit.Test)519 ArrayList (java.util.ArrayList)416 Map (java.util.Map)361 Test (org.junit.jupiter.api.Test)347 HashSet (java.util.HashSet)281 List (java.util.List)260 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)246 Set (java.util.Set)189 LinkedHashMap (java.util.LinkedHashMap)180 PartitionInfo (org.apache.kafka.common.PartitionInfo)170 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)155 TaskId (org.apache.kafka.streams.processor.TaskId)145 Node (org.apache.kafka.common.Node)140 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)109 KafkaException (org.apache.kafka.common.KafkaException)105 Errors (org.apache.kafka.common.protocol.Errors)105 ByteBuffer (java.nio.ByteBuffer)99 Properties (java.util.Properties)93