Search in sources :

Example 1 with KafkaScheduler

use of kafka.utils.KafkaScheduler in project kafka by apache.

the class CheckpointBench method setup.

@SuppressWarnings("deprecation")
@Setup(Level.Trial)
public void setup() {
    this.scheduler = new KafkaScheduler(1, "scheduler-thread", true);
    this.brokerProperties = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect(), true, true, 9092, Option.empty(), Option.empty(), Option.empty(), true, false, 0, false, 0, false, 0, Option.empty(), 1, true, 1, (short) 1));
    this.metrics = new Metrics();
    this.time = new MockTime();
    this.failureChannel = new LogDirFailureChannel(brokerProperties.logDirs().size());
    final List<File> files = JavaConverters.seqAsJavaList(brokerProperties.logDirs()).stream().map(File::new).collect(Collectors.toList());
    this.logManager = TestUtils.createLogManager(JavaConverters.asScalaBuffer(files), LogConfig.apply(), new MockConfigRepository(), CleanerConfig.apply(1, 4 * 1024 * 1024L, 0.9d, 1024 * 1024, 32 * 1024 * 1024, Double.MAX_VALUE, 15 * 1000, true, "MD5"), time, ApiVersion.latestVersion());
    scheduler.startup();
    final BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
    final MetadataCache metadataCache = MetadataCache.zkMetadataCache(this.brokerProperties.brokerId());
    this.quotaManagers = QuotaFactory.instantiate(this.brokerProperties, this.metrics, this.time, "");
    this.alterIsrManager = TestUtils.createAlterIsrManager();
    this.replicaManager = new ReplicaManagerBuilder().setConfig(brokerProperties).setMetrics(metrics).setTime(time).setScheduler(scheduler).setLogManager(logManager).setQuotaManagers(quotaManagers).setBrokerTopicStats(brokerTopicStats).setMetadataCache(metadataCache).setLogDirFailureChannel(failureChannel).setAlterIsrManager(alterIsrManager).build();
    replicaManager.startup();
    List<TopicPartition> topicPartitions = new ArrayList<>();
    for (int topicNum = 0; topicNum < numTopics; topicNum++) {
        final String topicName = this.topicName + "-" + topicNum;
        for (int partitionNum = 0; partitionNum < numPartitions; partitionNum++) {
            topicPartitions.add(new TopicPartition(topicName, partitionNum));
        }
    }
    OffsetCheckpoints checkpoints = (logDir, topicPartition) -> Option.apply(0L);
    for (TopicPartition topicPartition : topicPartitions) {
        final Partition partition = this.replicaManager.createPartition(topicPartition);
        partition.createLogIfNotExists(true, false, checkpoints, Option.apply(Uuid.randomUuid()));
    }
    replicaManager.checkpointHighWatermarks();
}
Also used : Uuid(org.apache.kafka.common.Uuid) Measurement(org.openjdk.jmh.annotations.Measurement) ReplicaManagerBuilder(kafka.server.builders.ReplicaManagerBuilder) CleanerConfig(kafka.log.CleanerConfig) OffsetCheckpoints(kafka.server.checkpoints.OffsetCheckpoints) QuotaFactory(kafka.server.QuotaFactory) KafkaScheduler(kafka.utils.KafkaScheduler) LogConfig(kafka.log.LogConfig) Scope(org.openjdk.jmh.annotations.Scope) Warmup(org.openjdk.jmh.annotations.Warmup) ArrayList(java.util.ArrayList) MockTime(kafka.utils.MockTime) OutputTimeUnit(org.openjdk.jmh.annotations.OutputTimeUnit) LogManager(kafka.log.LogManager) ReplicaManager(kafka.server.ReplicaManager) MockConfigRepository(kafka.server.metadata.MockConfigRepository) TearDown(org.openjdk.jmh.annotations.TearDown) KafkaConfig(kafka.server.KafkaConfig) Threads(org.openjdk.jmh.annotations.Threads) Partition(kafka.cluster.Partition) Utils(org.apache.kafka.common.utils.Utils) TopicPartition(org.apache.kafka.common.TopicPartition) Setup(org.openjdk.jmh.annotations.Setup) Scheduler(kafka.utils.Scheduler) Param(org.openjdk.jmh.annotations.Param) ApiVersion(kafka.api.ApiVersion) BrokerTopicStats(kafka.server.BrokerTopicStats) State(org.openjdk.jmh.annotations.State) Option(scala.Option) Collectors(java.util.stream.Collectors) Benchmark(org.openjdk.jmh.annotations.Benchmark) File(java.io.File) AlterIsrManager(kafka.server.AlterIsrManager) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Metrics(org.apache.kafka.common.metrics.Metrics) MetadataCache(kafka.server.MetadataCache) Level(org.openjdk.jmh.annotations.Level) JavaConverters(scala.collection.JavaConverters) TestUtils(kafka.utils.TestUtils) Fork(org.openjdk.jmh.annotations.Fork) LogDirFailureChannel(kafka.server.LogDirFailureChannel) Partition(kafka.cluster.Partition) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) MetadataCache(kafka.server.MetadataCache) ReplicaManagerBuilder(kafka.server.builders.ReplicaManagerBuilder) LogDirFailureChannel(kafka.server.LogDirFailureChannel) OffsetCheckpoints(kafka.server.checkpoints.OffsetCheckpoints) Metrics(org.apache.kafka.common.metrics.Metrics) BrokerTopicStats(kafka.server.BrokerTopicStats) MockConfigRepository(kafka.server.metadata.MockConfigRepository) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaScheduler(kafka.utils.KafkaScheduler) File(java.io.File) MockTime(kafka.utils.MockTime) Setup(org.openjdk.jmh.annotations.Setup)

Example 2 with KafkaScheduler

use of kafka.utils.KafkaScheduler in project kafka by apache.

the class PartitionCreationBench method setup.

@SuppressWarnings("deprecation")
@Setup(Level.Invocation)
public void setup() {
    if (useTopicIds)
        topicId = Option.apply(Uuid.randomUuid());
    else
        topicId = Option.empty();
    this.scheduler = new KafkaScheduler(1, "scheduler-thread", true);
    this.brokerProperties = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect(), true, true, 9092, Option.empty(), Option.empty(), Option.empty(), true, false, 0, false, 0, false, 0, Option.empty(), 1, true, 1, (short) 1));
    this.metrics = new Metrics();
    this.time = Time.SYSTEM;
    this.failureChannel = new LogDirFailureChannel(brokerProperties.logDirs().size());
    final BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
    final List<File> files = JavaConverters.seqAsJavaList(brokerProperties.logDirs()).stream().map(File::new).collect(Collectors.toList());
    CleanerConfig cleanerConfig = CleanerConfig.apply(1, 4 * 1024 * 1024L, 0.9d, 1024 * 1024, 32 * 1024 * 1024, Double.MAX_VALUE, 15 * 1000, true, "MD5");
    ConfigRepository configRepository = new MockConfigRepository();
    this.logManager = new LogManagerBuilder().setLogDirs(files).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(configRepository).setInitialDefaultConfig(createLogConfig()).setCleanerConfig(cleanerConfig).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(failureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
    scheduler.startup();
    this.quotaManagers = QuotaFactory.instantiate(this.brokerProperties, this.metrics, this.time, "");
    this.zkClient = new KafkaZkClient(null, false, Time.SYSTEM) {

        @Override
        public Properties getEntityConfigs(String rootEntityType, String sanitizedEntityName) {
            return new Properties();
        }
    };
    this.alterIsrManager = TestUtils.createAlterIsrManager();
    this.replicaManager = new ReplicaManagerBuilder().setConfig(brokerProperties).setMetrics(metrics).setTime(time).setZkClient(zkClient).setScheduler(scheduler).setLogManager(logManager).setQuotaManagers(quotaManagers).setBrokerTopicStats(brokerTopicStats).setMetadataCache(new ZkMetadataCache(this.brokerProperties.brokerId())).setLogDirFailureChannel(failureChannel).setAlterIsrManager(alterIsrManager).build();
    replicaManager.startup();
    replicaManager.checkpointHighWatermarks();
}
Also used : ConfigRepository(kafka.server.metadata.ConfigRepository) MockConfigRepository(kafka.server.metadata.MockConfigRepository) LogManagerBuilder(kafka.server.builders.LogManagerBuilder) ReplicaManagerBuilder(kafka.server.builders.ReplicaManagerBuilder) LogDirFailureChannel(kafka.server.LogDirFailureChannel) CleanerConfig(kafka.log.CleanerConfig) KafkaZkClient(kafka.zk.KafkaZkClient) Properties(java.util.Properties) ZkMetadataCache(kafka.server.metadata.ZkMetadataCache) Metrics(org.apache.kafka.common.metrics.Metrics) BrokerTopicStats(kafka.server.BrokerTopicStats) MockConfigRepository(kafka.server.metadata.MockConfigRepository) KafkaScheduler(kafka.utils.KafkaScheduler) File(java.io.File) Setup(org.openjdk.jmh.annotations.Setup)

Aggregations

File (java.io.File)2 CleanerConfig (kafka.log.CleanerConfig)2 BrokerTopicStats (kafka.server.BrokerTopicStats)2 LogDirFailureChannel (kafka.server.LogDirFailureChannel)2 ReplicaManagerBuilder (kafka.server.builders.ReplicaManagerBuilder)2 MockConfigRepository (kafka.server.metadata.MockConfigRepository)2 KafkaScheduler (kafka.utils.KafkaScheduler)2 Metrics (org.apache.kafka.common.metrics.Metrics)2 Setup (org.openjdk.jmh.annotations.Setup)2 ArrayList (java.util.ArrayList)1 List (java.util.List)1 Properties (java.util.Properties)1 TimeUnit (java.util.concurrent.TimeUnit)1 Collectors (java.util.stream.Collectors)1 ApiVersion (kafka.api.ApiVersion)1 Partition (kafka.cluster.Partition)1 LogConfig (kafka.log.LogConfig)1 LogManager (kafka.log.LogManager)1 AlterIsrManager (kafka.server.AlterIsrManager)1 KafkaConfig (kafka.server.KafkaConfig)1