Search in sources :

Example 6 with KafkaCruiseControlConfig

use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.

the class KafkaMetricSampleAggregatorTest method testSnapshotWithUpdatedCluster.

@Test
public void testSnapshotWithUpdatedCluster() throws NotEnoughValidWindowsException {
    KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
    Metadata metadata = getMetadata(Collections.singleton(TP));
    KafkaMetricSampleAggregator metricSampleAggregator = new KafkaMetricSampleAggregator(config, metadata);
    populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);
    TopicPartition tp1 = new TopicPartition(TOPIC, 1);
    Cluster cluster = getCluster(Arrays.asList(TP, tp1));
    metadata.update(cluster, Collections.emptySet(), 1);
    Map<PartitionEntity, ValuesAndExtrapolations> snapshotsForPartition = metricSampleAggregator.aggregate(clusterAndGeneration(cluster), Long.MAX_VALUE, new OperationProgress()).valuesAndExtrapolations();
    assertTrue("tp1 should not be included because recent snapshot does not include all topics", snapshotsForPartition.isEmpty());
    ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, true);
    MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(clusterAndGeneration(cluster), -1, Long.MAX_VALUE, requirements, new OperationProgress());
    snapshotsForPartition = result.valuesAndExtrapolations();
    assertNotNull("tp1 should be included because includeAllTopics is set to true", snapshotsForPartition.get(new PartitionEntity(tp1)));
    Map<Integer, Extrapolation> extrapolationss = snapshotsForPartition.get(new PartitionEntity(tp1)).extrapolations();
    assertEquals(NUM_WINDOWS, extrapolationss.size());
    for (int i = 0; i < NUM_WINDOWS; i++) {
        assertEquals(Extrapolation.NO_VALID_EXTRAPOLATION, extrapolationss.get(i));
    }
}
Also used : OperationProgress(com.linkedin.kafka.cruisecontrol.async.progress.OperationProgress) PartitionEntity(com.linkedin.kafka.cruisecontrol.monitor.sampling.PartitionEntity) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) Extrapolation(com.linkedin.cruisecontrol.monitor.sampling.aggregator.Extrapolation) ValuesAndExtrapolations(com.linkedin.cruisecontrol.monitor.sampling.aggregator.ValuesAndExtrapolations) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaCruiseControlConfig(com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig) ModelCompletenessRequirements(com.linkedin.kafka.cruisecontrol.monitor.ModelCompletenessRequirements) Test(org.junit.Test)

Example 7 with KafkaCruiseControlConfig

use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.

the class KafkaMetricSampleAggregatorTest method testFallbackToAvgAvailable.

@Test
public void testFallbackToAvgAvailable() throws NotEnoughValidWindowsException {
    KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
    Metadata metadata = getMetadata(Collections.singleton(TP));
    KafkaMetricSampleAggregator metricSampleAggregator = new KafkaMetricSampleAggregator(config, metadata);
    // Only give two sample to the aggregator.
    CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, 2, WINDOW_MS, KafkaCruiseControlMetricDef.metricDef());
    MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(clusterAndGeneration(metadata.fetch()), NUM_WINDOWS * WINDOW_MS, new OperationProgress());
    assertTrue(result.valuesAndExtrapolations().isEmpty());
    populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW - 2, metricSampleAggregator);
    result = metricSampleAggregator.aggregate(clusterAndGeneration(metadata.fetch()), NUM_WINDOWS * WINDOW_MS, new OperationProgress());
    int numSnapshots = result.valuesAndExtrapolations().get(PE).metricValues().length();
    assertEquals(NUM_WINDOWS, numSnapshots);
    int numExtrapolationss = 0;
    for (Map.Entry<Integer, Extrapolation> entry : result.valuesAndExtrapolations().get(PE).extrapolations().entrySet()) {
        assertEquals(Extrapolation.AVG_AVAILABLE, entry.getValue());
        numExtrapolationss++;
    }
    assertEquals(2, numExtrapolationss);
}
Also used : Extrapolation(com.linkedin.cruisecontrol.monitor.sampling.aggregator.Extrapolation) OperationProgress(com.linkedin.kafka.cruisecontrol.async.progress.OperationProgress) PartitionEntity(com.linkedin.kafka.cruisecontrol.monitor.sampling.PartitionEntity) Metadata(org.apache.kafka.clients.Metadata) KafkaCruiseControlConfig(com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig) Map(java.util.Map) Test(org.junit.Test)

Example 8 with KafkaCruiseControlConfig

use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.

the class LoadMonitorTaskRunnerTest method testSimpleFetch.

@Test
public void testSimpleFetch() throws InterruptedException {
    KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
    Metadata metadata = new Metadata(10, 10, false);
    MetadataClient metadataClient = new MetadataClient(config, metadata, -1L, TIME);
    MockMetricSampleAggregator mockMetricSampleAggregator = new MockMetricSampleAggregator(config, metadata);
    List<MetricSampler> samplers = new ArrayList<>();
    MetricRegistry dropwizardMetricRegistry = new MetricRegistry();
    for (int i = 0; i < NUM_METRIC_FETCHERS; i++) {
        samplers.add(new MockSampler(0));
    }
    MetricFetcherManager fetcherManager = new MetricFetcherManager(config, mockMetricSampleAggregator, metadataClient, METRIC_DEF, TIME, dropwizardMetricRegistry, samplers);
    LoadMonitorTaskRunner loadMonitorTaskRunner = new LoadMonitorTaskRunner(config, fetcherManager, mockMetricSampleAggregator, metadataClient, TIME);
    while (metadata.fetch().topics().size() < NUM_TOPICS) {
        Thread.sleep(10);
        metadataClient.refreshMetadata();
    }
    loadMonitorTaskRunner.start(true);
    Set<TopicPartition> partitionsToSample = new HashSet<>();
    for (int i = 0; i < NUM_TOPICS; i++) {
        for (int j = 0; j < NUM_PARTITIONS; j++) {
            partitionsToSample.add(new TopicPartition("topic-" + i, j));
        }
    }
    long startMs = System.currentTimeMillis();
    BlockingQueue<PartitionMetricSample> sampleQueue = mockMetricSampleAggregator.metricSampleQueue();
    while (!partitionsToSample.isEmpty() && System.currentTimeMillis() < startMs + 10000) {
        PartitionMetricSample sample = sampleQueue.poll();
        if (sample != null) {
            assertTrue("The topic partition should have been sampled and sampled only once.", partitionsToSample.contains(sample.entity().tp()));
            partitionsToSample.remove(sample.entity().tp());
        }
    }
    assertTrue("Did not see sample for partitions " + Arrays.toString(partitionsToSample.toArray()), partitionsToSample.isEmpty());
    fetcherManager.shutdown();
    assertTrue(sampleQueue.isEmpty());
}
Also used : MetricFetcherManager(com.linkedin.kafka.cruisecontrol.monitor.sampling.MetricFetcherManager) MetricSampler(com.linkedin.kafka.cruisecontrol.monitor.sampling.MetricSampler) MetricRegistry(com.codahale.metrics.MetricRegistry) Metadata(org.apache.kafka.clients.Metadata) ArrayList(java.util.ArrayList) PartitionMetricSample(com.linkedin.kafka.cruisecontrol.monitor.sampling.PartitionMetricSample) MetadataClient(com.linkedin.kafka.cruisecontrol.common.MetadataClient) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaCruiseControlConfig(com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 9 with KafkaCruiseControlConfig

use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.

the class BrokerFailureDetectorTest method createBrokerFailureDetector.

private BrokerFailureDetector createBrokerFailureDetector(Queue<Anomaly> anomalies, Time time) {
    LoadMonitor mockLoadMonitor = EasyMock.mock(LoadMonitor.class);
    EasyMock.expect(mockLoadMonitor.brokersWithPartitions(anyLong())).andAnswer(() -> new HashSet<>(Arrays.asList(0, 1))).anyTimes();
    EasyMock.replay(mockLoadMonitor);
    Properties props = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
    props.setProperty(KafkaCruiseControlConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeper().getConnectionString());
    KafkaCruiseControlConfig kafkaCruiseControlConfig = new KafkaCruiseControlConfig(props);
    return new BrokerFailureDetector(kafkaCruiseControlConfig, mockLoadMonitor, anomalies, time);
}
Also used : KafkaCruiseControlConfig(com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig) Properties(java.util.Properties) LoadMonitor(com.linkedin.kafka.cruisecontrol.monitor.LoadMonitor) HashSet(java.util.HashSet)

Example 10 with KafkaCruiseControlConfig

use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.

the class RandomClusterTest method data.

/**
 * Populate parameters for the {@link OptimizationVerifier}. All brokers are alive.
 *
 * @return Parameters for the {@link OptimizationVerifier}.
 */
public static Collection<Object[]> data(TestConstants.Distribution distribution) {
    Collection<Object[]> p = new ArrayList<>();
    Map<Integer, String> goalNameByPriority = new HashMap<>();
    goalNameByPriority.put(1, RackAwareGoal.class.getName());
    goalNameByPriority.put(2, ReplicaCapacityGoal.class.getName());
    goalNameByPriority.put(3, DiskCapacityGoal.class.getName());
    goalNameByPriority.put(4, NetworkInboundCapacityGoal.class.getName());
    goalNameByPriority.put(5, NetworkOutboundCapacityGoal.class.getName());
    goalNameByPriority.put(6, CpuCapacityGoal.class.getName());
    goalNameByPriority.put(7, ReplicaDistributionGoal.class.getName());
    goalNameByPriority.put(8, PotentialNwOutGoal.class.getName());
    goalNameByPriority.put(9, DiskUsageDistributionGoal.class.getName());
    goalNameByPriority.put(10, NetworkInboundUsageDistributionGoal.class.getName());
    goalNameByPriority.put(11, NetworkOutboundUsageDistributionGoal.class.getName());
    goalNameByPriority.put(12, CpuUsageDistributionGoal.class.getName());
    goalNameByPriority.put(13, TopicReplicaDistributionGoal.class.getName());
    goalNameByPriority.put(14, PreferredLeaderElectionGoal.class.getName());
    goalNameByPriority.put(15, LeaderBytesInDistributionGoal.class.getName());
    Map<Integer, String> kafkaAssignerGoals = new HashMap<>();
    kafkaAssignerGoals.put(1, KafkaAssignerEvenRackAwareGoal.class.getName());
    kafkaAssignerGoals.put(2, KafkaAssignerDiskUsageDistributionGoal.class.getName());
    List<OptimizationVerifier.Verification> verifications = Arrays.asList(NEW_BROKERS, DEAD_BROKERS, REGRESSION);
    List<OptimizationVerifier.Verification> kafkaAssignerVerifications = Arrays.asList(GOAL_VIOLATION, DEAD_BROKERS, REGRESSION);
    Properties props = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
    props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(1500L));
    BalancingConstraint balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
    balancingConstraint.setResourceBalancePercentage(TestConstants.LOW_BALANCE_PERCENTAGE);
    balancingConstraint.setCapacityThreshold(TestConstants.MEDIUM_CAPACITY_THRESHOLD);
    Map<ClusterProperty, Number> modifiedProperties;
    // Test: Increase Broker Count
    for (int i = 1; i <= 6; i++) {
        modifiedProperties = new HashMap<>();
        modifiedProperties.put(ClusterProperty.NUM_BROKERS, 20 + i * 20);
        p.add(params(modifiedProperties, goalNameByPriority, distribution, balancingConstraint, verifications));
        p.add(params(modifiedProperties, kafkaAssignerGoals, distribution, balancingConstraint, kafkaAssignerVerifications));
    }
    // Test: Increase Replica Count
    props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(3000L));
    balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
    balancingConstraint.setResourceBalancePercentage(TestConstants.LOW_BALANCE_PERCENTAGE);
    balancingConstraint.setCapacityThreshold(TestConstants.MEDIUM_CAPACITY_THRESHOLD);
    for (int i = 7; i <= 12; i++) {
        modifiedProperties = new HashMap<>();
        modifiedProperties.put(ClusterProperty.NUM_REPLICAS, 50001 + (i - 7) * 5001);
        p.add(params(modifiedProperties, goalNameByPriority, distribution, balancingConstraint, verifications));
        p.add(params(modifiedProperties, kafkaAssignerGoals, distribution, balancingConstraint, kafkaAssignerVerifications));
    }
    // Test: Increase Topic Count
    for (int i = 13; i <= 18; i++) {
        modifiedProperties = new HashMap<>();
        modifiedProperties.put(ClusterProperty.NUM_TOPICS, 3000 + (i - 13) * 1000);
        p.add(params(modifiedProperties, goalNameByPriority, distribution, balancingConstraint, verifications));
        p.add(params(modifiedProperties, kafkaAssignerGoals, distribution, balancingConstraint, kafkaAssignerVerifications));
    }
    // Test: Increase Replication Count
    for (int i = 19; i <= 24; i++) {
        modifiedProperties = new HashMap<>();
        modifiedProperties.put(ClusterProperty.NUM_REPLICAS, 50000 - (50000 % (i - 16)));
        modifiedProperties.put(ClusterProperty.MIN_REPLICATION, (i - 16));
        modifiedProperties.put(ClusterProperty.MAX_REPLICATION, (i - 16));
        p.add(params(modifiedProperties, goalNameByPriority, distribution, balancingConstraint, verifications));
        p.add(params(modifiedProperties, kafkaAssignerGoals, distribution, balancingConstraint, kafkaAssignerVerifications));
    }
    return p;
}
Also used : NetworkInboundUsageDistributionGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundUsageDistributionGoal) HashMap(java.util.HashMap) PreferredLeaderElectionGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.PreferredLeaderElectionGoal) CpuUsageDistributionGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuUsageDistributionGoal) ArrayList(java.util.ArrayList) KafkaAssignerDiskUsageDistributionGoal(com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerDiskUsageDistributionGoal) PotentialNwOutGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.PotentialNwOutGoal) Properties(java.util.Properties) NetworkInboundCapacityGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundCapacityGoal) DiskCapacityGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskCapacityGoal) KafkaAssignerEvenRackAwareGoal(com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerEvenRackAwareGoal) TopicReplicaDistributionGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.TopicReplicaDistributionGoal) NetworkOutboundCapacityGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundCapacityGoal) CpuCapacityGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuCapacityGoal) KafkaCruiseControlConfig(com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig) NetworkOutboundUsageDistributionGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundUsageDistributionGoal) ClusterProperty(com.linkedin.kafka.cruisecontrol.common.ClusterProperty) LeaderBytesInDistributionGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.LeaderBytesInDistributionGoal) TopicReplicaDistributionGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.TopicReplicaDistributionGoal) ReplicaDistributionGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaDistributionGoal) RackAwareGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.RackAwareGoal) KafkaAssignerEvenRackAwareGoal(com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerEvenRackAwareGoal) DiskUsageDistributionGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskUsageDistributionGoal) KafkaAssignerDiskUsageDistributionGoal(com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerDiskUsageDistributionGoal) ReplicaCapacityGoal(com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaCapacityGoal)

Aggregations

KafkaCruiseControlConfig (com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig)30 Metadata (org.apache.kafka.clients.Metadata)15 Properties (java.util.Properties)13 Test (org.junit.Test)13 OperationProgress (com.linkedin.kafka.cruisecontrol.async.progress.OperationProgress)11 PartitionEntity (com.linkedin.kafka.cruisecontrol.monitor.sampling.PartitionEntity)10 TopicPartition (org.apache.kafka.common.TopicPartition)9 MetricRegistry (com.codahale.metrics.MetricRegistry)7 ArrayList (java.util.ArrayList)6 BalancingConstraint (com.linkedin.kafka.cruisecontrol.analyzer.BalancingConstraint)5 HashMap (java.util.HashMap)5 CpuCapacityGoal (com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuCapacityGoal)4 CpuUsageDistributionGoal (com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuUsageDistributionGoal)4 DiskCapacityGoal (com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskCapacityGoal)4 DiskUsageDistributionGoal (com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskUsageDistributionGoal)4 NetworkInboundCapacityGoal (com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundCapacityGoal)4 NetworkInboundUsageDistributionGoal (com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundUsageDistributionGoal)4 NetworkOutboundCapacityGoal (com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundCapacityGoal)4 NetworkOutboundUsageDistributionGoal (com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundUsageDistributionGoal)4 PotentialNwOutGoal (com.linkedin.kafka.cruisecontrol.analyzer.goals.PotentialNwOutGoal)4