use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.
the class KafkaMetricSampleAggregatorTest method testSnapshotWithUpdatedCluster.
@Test
public void testSnapshotWithUpdatedCluster() throws NotEnoughValidWindowsException {
KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
Metadata metadata = getMetadata(Collections.singleton(TP));
KafkaMetricSampleAggregator metricSampleAggregator = new KafkaMetricSampleAggregator(config, metadata);
populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);
TopicPartition tp1 = new TopicPartition(TOPIC, 1);
Cluster cluster = getCluster(Arrays.asList(TP, tp1));
metadata.update(cluster, Collections.emptySet(), 1);
Map<PartitionEntity, ValuesAndExtrapolations> snapshotsForPartition = metricSampleAggregator.aggregate(clusterAndGeneration(cluster), Long.MAX_VALUE, new OperationProgress()).valuesAndExtrapolations();
assertTrue("tp1 should not be included because recent snapshot does not include all topics", snapshotsForPartition.isEmpty());
ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, true);
MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(clusterAndGeneration(cluster), -1, Long.MAX_VALUE, requirements, new OperationProgress());
snapshotsForPartition = result.valuesAndExtrapolations();
assertNotNull("tp1 should be included because includeAllTopics is set to true", snapshotsForPartition.get(new PartitionEntity(tp1)));
Map<Integer, Extrapolation> extrapolationss = snapshotsForPartition.get(new PartitionEntity(tp1)).extrapolations();
assertEquals(NUM_WINDOWS, extrapolationss.size());
for (int i = 0; i < NUM_WINDOWS; i++) {
assertEquals(Extrapolation.NO_VALID_EXTRAPOLATION, extrapolationss.get(i));
}
}
use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.
the class KafkaMetricSampleAggregatorTest method testFallbackToAvgAvailable.
@Test
public void testFallbackToAvgAvailable() throws NotEnoughValidWindowsException {
KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
Metadata metadata = getMetadata(Collections.singleton(TP));
KafkaMetricSampleAggregator metricSampleAggregator = new KafkaMetricSampleAggregator(config, metadata);
// Only give two sample to the aggregator.
CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, 2, WINDOW_MS, KafkaCruiseControlMetricDef.metricDef());
MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(clusterAndGeneration(metadata.fetch()), NUM_WINDOWS * WINDOW_MS, new OperationProgress());
assertTrue(result.valuesAndExtrapolations().isEmpty());
populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW - 2, metricSampleAggregator);
result = metricSampleAggregator.aggregate(clusterAndGeneration(metadata.fetch()), NUM_WINDOWS * WINDOW_MS, new OperationProgress());
int numSnapshots = result.valuesAndExtrapolations().get(PE).metricValues().length();
assertEquals(NUM_WINDOWS, numSnapshots);
int numExtrapolationss = 0;
for (Map.Entry<Integer, Extrapolation> entry : result.valuesAndExtrapolations().get(PE).extrapolations().entrySet()) {
assertEquals(Extrapolation.AVG_AVAILABLE, entry.getValue());
numExtrapolationss++;
}
assertEquals(2, numExtrapolationss);
}
use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.
the class LoadMonitorTaskRunnerTest method testSimpleFetch.
@Test
public void testSimpleFetch() throws InterruptedException {
KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
Metadata metadata = new Metadata(10, 10, false);
MetadataClient metadataClient = new MetadataClient(config, metadata, -1L, TIME);
MockMetricSampleAggregator mockMetricSampleAggregator = new MockMetricSampleAggregator(config, metadata);
List<MetricSampler> samplers = new ArrayList<>();
MetricRegistry dropwizardMetricRegistry = new MetricRegistry();
for (int i = 0; i < NUM_METRIC_FETCHERS; i++) {
samplers.add(new MockSampler(0));
}
MetricFetcherManager fetcherManager = new MetricFetcherManager(config, mockMetricSampleAggregator, metadataClient, METRIC_DEF, TIME, dropwizardMetricRegistry, samplers);
LoadMonitorTaskRunner loadMonitorTaskRunner = new LoadMonitorTaskRunner(config, fetcherManager, mockMetricSampleAggregator, metadataClient, TIME);
while (metadata.fetch().topics().size() < NUM_TOPICS) {
Thread.sleep(10);
metadataClient.refreshMetadata();
}
loadMonitorTaskRunner.start(true);
Set<TopicPartition> partitionsToSample = new HashSet<>();
for (int i = 0; i < NUM_TOPICS; i++) {
for (int j = 0; j < NUM_PARTITIONS; j++) {
partitionsToSample.add(new TopicPartition("topic-" + i, j));
}
}
long startMs = System.currentTimeMillis();
BlockingQueue<PartitionMetricSample> sampleQueue = mockMetricSampleAggregator.metricSampleQueue();
while (!partitionsToSample.isEmpty() && System.currentTimeMillis() < startMs + 10000) {
PartitionMetricSample sample = sampleQueue.poll();
if (sample != null) {
assertTrue("The topic partition should have been sampled and sampled only once.", partitionsToSample.contains(sample.entity().tp()));
partitionsToSample.remove(sample.entity().tp());
}
}
assertTrue("Did not see sample for partitions " + Arrays.toString(partitionsToSample.toArray()), partitionsToSample.isEmpty());
fetcherManager.shutdown();
assertTrue(sampleQueue.isEmpty());
}
use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.
the class BrokerFailureDetectorTest method createBrokerFailureDetector.
private BrokerFailureDetector createBrokerFailureDetector(Queue<Anomaly> anomalies, Time time) {
LoadMonitor mockLoadMonitor = EasyMock.mock(LoadMonitor.class);
EasyMock.expect(mockLoadMonitor.brokersWithPartitions(anyLong())).andAnswer(() -> new HashSet<>(Arrays.asList(0, 1))).anyTimes();
EasyMock.replay(mockLoadMonitor);
Properties props = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
props.setProperty(KafkaCruiseControlConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeper().getConnectionString());
KafkaCruiseControlConfig kafkaCruiseControlConfig = new KafkaCruiseControlConfig(props);
return new BrokerFailureDetector(kafkaCruiseControlConfig, mockLoadMonitor, anomalies, time);
}
use of com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig in project cruise-control by linkedin.
the class RandomClusterTest method data.
/**
* Populate parameters for the {@link OptimizationVerifier}. All brokers are alive.
*
* @return Parameters for the {@link OptimizationVerifier}.
*/
public static Collection<Object[]> data(TestConstants.Distribution distribution) {
Collection<Object[]> p = new ArrayList<>();
Map<Integer, String> goalNameByPriority = new HashMap<>();
goalNameByPriority.put(1, RackAwareGoal.class.getName());
goalNameByPriority.put(2, ReplicaCapacityGoal.class.getName());
goalNameByPriority.put(3, DiskCapacityGoal.class.getName());
goalNameByPriority.put(4, NetworkInboundCapacityGoal.class.getName());
goalNameByPriority.put(5, NetworkOutboundCapacityGoal.class.getName());
goalNameByPriority.put(6, CpuCapacityGoal.class.getName());
goalNameByPriority.put(7, ReplicaDistributionGoal.class.getName());
goalNameByPriority.put(8, PotentialNwOutGoal.class.getName());
goalNameByPriority.put(9, DiskUsageDistributionGoal.class.getName());
goalNameByPriority.put(10, NetworkInboundUsageDistributionGoal.class.getName());
goalNameByPriority.put(11, NetworkOutboundUsageDistributionGoal.class.getName());
goalNameByPriority.put(12, CpuUsageDistributionGoal.class.getName());
goalNameByPriority.put(13, TopicReplicaDistributionGoal.class.getName());
goalNameByPriority.put(14, PreferredLeaderElectionGoal.class.getName());
goalNameByPriority.put(15, LeaderBytesInDistributionGoal.class.getName());
Map<Integer, String> kafkaAssignerGoals = new HashMap<>();
kafkaAssignerGoals.put(1, KafkaAssignerEvenRackAwareGoal.class.getName());
kafkaAssignerGoals.put(2, KafkaAssignerDiskUsageDistributionGoal.class.getName());
List<OptimizationVerifier.Verification> verifications = Arrays.asList(NEW_BROKERS, DEAD_BROKERS, REGRESSION);
List<OptimizationVerifier.Verification> kafkaAssignerVerifications = Arrays.asList(GOAL_VIOLATION, DEAD_BROKERS, REGRESSION);
Properties props = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(1500L));
BalancingConstraint balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
balancingConstraint.setResourceBalancePercentage(TestConstants.LOW_BALANCE_PERCENTAGE);
balancingConstraint.setCapacityThreshold(TestConstants.MEDIUM_CAPACITY_THRESHOLD);
Map<ClusterProperty, Number> modifiedProperties;
// Test: Increase Broker Count
for (int i = 1; i <= 6; i++) {
modifiedProperties = new HashMap<>();
modifiedProperties.put(ClusterProperty.NUM_BROKERS, 20 + i * 20);
p.add(params(modifiedProperties, goalNameByPriority, distribution, balancingConstraint, verifications));
p.add(params(modifiedProperties, kafkaAssignerGoals, distribution, balancingConstraint, kafkaAssignerVerifications));
}
// Test: Increase Replica Count
props.setProperty(KafkaCruiseControlConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(3000L));
balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
balancingConstraint.setResourceBalancePercentage(TestConstants.LOW_BALANCE_PERCENTAGE);
balancingConstraint.setCapacityThreshold(TestConstants.MEDIUM_CAPACITY_THRESHOLD);
for (int i = 7; i <= 12; i++) {
modifiedProperties = new HashMap<>();
modifiedProperties.put(ClusterProperty.NUM_REPLICAS, 50001 + (i - 7) * 5001);
p.add(params(modifiedProperties, goalNameByPriority, distribution, balancingConstraint, verifications));
p.add(params(modifiedProperties, kafkaAssignerGoals, distribution, balancingConstraint, kafkaAssignerVerifications));
}
// Test: Increase Topic Count
for (int i = 13; i <= 18; i++) {
modifiedProperties = new HashMap<>();
modifiedProperties.put(ClusterProperty.NUM_TOPICS, 3000 + (i - 13) * 1000);
p.add(params(modifiedProperties, goalNameByPriority, distribution, balancingConstraint, verifications));
p.add(params(modifiedProperties, kafkaAssignerGoals, distribution, balancingConstraint, kafkaAssignerVerifications));
}
// Test: Increase Replication Count
for (int i = 19; i <= 24; i++) {
modifiedProperties = new HashMap<>();
modifiedProperties.put(ClusterProperty.NUM_REPLICAS, 50000 - (50000 % (i - 16)));
modifiedProperties.put(ClusterProperty.MIN_REPLICATION, (i - 16));
modifiedProperties.put(ClusterProperty.MAX_REPLICATION, (i - 16));
p.add(params(modifiedProperties, goalNameByPriority, distribution, balancingConstraint, verifications));
p.add(params(modifiedProperties, kafkaAssignerGoals, distribution, balancingConstraint, kafkaAssignerVerifications));
}
return p;
}
Aggregations