use of kafka.log.LogManager in project kafka by apache.
the class CheckpointBench method setup.
@SuppressWarnings("deprecation")
@Setup(Level.Trial)
public void setup() {
this.scheduler = new KafkaScheduler(1, "scheduler-thread", true);
this.brokerProperties = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect(), true, true, 9092, Option.empty(), Option.empty(), Option.empty(), true, false, 0, false, 0, false, 0, Option.empty(), 1, true, 1, (short) 1));
this.metrics = new Metrics();
this.time = new MockTime();
this.failureChannel = new LogDirFailureChannel(brokerProperties.logDirs().size());
final List<File> files = JavaConverters.seqAsJavaList(brokerProperties.logDirs()).stream().map(File::new).collect(Collectors.toList());
this.logManager = TestUtils.createLogManager(JavaConverters.asScalaBuffer(files), LogConfig.apply(), new MockConfigRepository(), CleanerConfig.apply(1, 4 * 1024 * 1024L, 0.9d, 1024 * 1024, 32 * 1024 * 1024, Double.MAX_VALUE, 15 * 1000, true, "MD5"), time, ApiVersion.latestVersion());
scheduler.startup();
final BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
final MetadataCache metadataCache = MetadataCache.zkMetadataCache(this.brokerProperties.brokerId());
this.quotaManagers = QuotaFactory.instantiate(this.brokerProperties, this.metrics, this.time, "");
this.alterIsrManager = TestUtils.createAlterIsrManager();
this.replicaManager = new ReplicaManagerBuilder().setConfig(brokerProperties).setMetrics(metrics).setTime(time).setScheduler(scheduler).setLogManager(logManager).setQuotaManagers(quotaManagers).setBrokerTopicStats(brokerTopicStats).setMetadataCache(metadataCache).setLogDirFailureChannel(failureChannel).setAlterIsrManager(alterIsrManager).build();
replicaManager.startup();
List<TopicPartition> topicPartitions = new ArrayList<>();
for (int topicNum = 0; topicNum < numTopics; topicNum++) {
final String topicName = this.topicName + "-" + topicNum;
for (int partitionNum = 0; partitionNum < numPartitions; partitionNum++) {
topicPartitions.add(new TopicPartition(topicName, partitionNum));
}
}
OffsetCheckpoints checkpoints = (logDir, topicPartition) -> Option.apply(0L);
for (TopicPartition topicPartition : topicPartitions) {
final Partition partition = this.replicaManager.createPartition(topicPartition);
partition.createLogIfNotExists(true, false, checkpoints, Option.apply(Uuid.randomUuid()));
}
replicaManager.checkpointHighWatermarks();
}
Aggregations