Search in sources :

Example 1 with MockConfigRepository

use of kafka.server.metadata.MockConfigRepository in project kafka by apache.

the class CheckpointBench method setup.

@SuppressWarnings("deprecation")
@Setup(Level.Trial)
public void setup() {
    this.scheduler = new KafkaScheduler(1, "scheduler-thread", true);
    this.brokerProperties = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect(), true, true, 9092, Option.empty(), Option.empty(), Option.empty(), true, false, 0, false, 0, false, 0, Option.empty(), 1, true, 1, (short) 1));
    this.metrics = new Metrics();
    this.time = new MockTime();
    this.failureChannel = new LogDirFailureChannel(brokerProperties.logDirs().size());
    final List<File> files = JavaConverters.seqAsJavaList(brokerProperties.logDirs()).stream().map(File::new).collect(Collectors.toList());
    this.logManager = TestUtils.createLogManager(JavaConverters.asScalaBuffer(files), LogConfig.apply(), new MockConfigRepository(), CleanerConfig.apply(1, 4 * 1024 * 1024L, 0.9d, 1024 * 1024, 32 * 1024 * 1024, Double.MAX_VALUE, 15 * 1000, true, "MD5"), time, ApiVersion.latestVersion());
    scheduler.startup();
    final BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
    final MetadataCache metadataCache = MetadataCache.zkMetadataCache(this.brokerProperties.brokerId());
    this.quotaManagers = QuotaFactory.instantiate(this.brokerProperties, this.metrics, this.time, "");
    this.alterIsrManager = TestUtils.createAlterIsrManager();
    this.replicaManager = new ReplicaManagerBuilder().setConfig(brokerProperties).setMetrics(metrics).setTime(time).setScheduler(scheduler).setLogManager(logManager).setQuotaManagers(quotaManagers).setBrokerTopicStats(brokerTopicStats).setMetadataCache(metadataCache).setLogDirFailureChannel(failureChannel).setAlterIsrManager(alterIsrManager).build();
    replicaManager.startup();
    List<TopicPartition> topicPartitions = new ArrayList<>();
    for (int topicNum = 0; topicNum < numTopics; topicNum++) {
        final String topicName = this.topicName + "-" + topicNum;
        for (int partitionNum = 0; partitionNum < numPartitions; partitionNum++) {
            topicPartitions.add(new TopicPartition(topicName, partitionNum));
        }
    }
    OffsetCheckpoints checkpoints = (logDir, topicPartition) -> Option.apply(0L);
    for (TopicPartition topicPartition : topicPartitions) {
        final Partition partition = this.replicaManager.createPartition(topicPartition);
        partition.createLogIfNotExists(true, false, checkpoints, Option.apply(Uuid.randomUuid()));
    }
    replicaManager.checkpointHighWatermarks();
}
Also used : Uuid(org.apache.kafka.common.Uuid) Measurement(org.openjdk.jmh.annotations.Measurement) ReplicaManagerBuilder(kafka.server.builders.ReplicaManagerBuilder) CleanerConfig(kafka.log.CleanerConfig) OffsetCheckpoints(kafka.server.checkpoints.OffsetCheckpoints) QuotaFactory(kafka.server.QuotaFactory) KafkaScheduler(kafka.utils.KafkaScheduler) LogConfig(kafka.log.LogConfig) Scope(org.openjdk.jmh.annotations.Scope) Warmup(org.openjdk.jmh.annotations.Warmup) ArrayList(java.util.ArrayList) MockTime(kafka.utils.MockTime) OutputTimeUnit(org.openjdk.jmh.annotations.OutputTimeUnit) LogManager(kafka.log.LogManager) ReplicaManager(kafka.server.ReplicaManager) MockConfigRepository(kafka.server.metadata.MockConfigRepository) TearDown(org.openjdk.jmh.annotations.TearDown) KafkaConfig(kafka.server.KafkaConfig) Threads(org.openjdk.jmh.annotations.Threads) Partition(kafka.cluster.Partition) Utils(org.apache.kafka.common.utils.Utils) TopicPartition(org.apache.kafka.common.TopicPartition) Setup(org.openjdk.jmh.annotations.Setup) Scheduler(kafka.utils.Scheduler) Param(org.openjdk.jmh.annotations.Param) ApiVersion(kafka.api.ApiVersion) BrokerTopicStats(kafka.server.BrokerTopicStats) State(org.openjdk.jmh.annotations.State) Option(scala.Option) Collectors(java.util.stream.Collectors) Benchmark(org.openjdk.jmh.annotations.Benchmark) File(java.io.File) AlterIsrManager(kafka.server.AlterIsrManager) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Metrics(org.apache.kafka.common.metrics.Metrics) MetadataCache(kafka.server.MetadataCache) Level(org.openjdk.jmh.annotations.Level) JavaConverters(scala.collection.JavaConverters) TestUtils(kafka.utils.TestUtils) Fork(org.openjdk.jmh.annotations.Fork) LogDirFailureChannel(kafka.server.LogDirFailureChannel) Partition(kafka.cluster.Partition) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) MetadataCache(kafka.server.MetadataCache) ReplicaManagerBuilder(kafka.server.builders.ReplicaManagerBuilder) LogDirFailureChannel(kafka.server.LogDirFailureChannel) OffsetCheckpoints(kafka.server.checkpoints.OffsetCheckpoints) Metrics(org.apache.kafka.common.metrics.Metrics) BrokerTopicStats(kafka.server.BrokerTopicStats) MockConfigRepository(kafka.server.metadata.MockConfigRepository) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaScheduler(kafka.utils.KafkaScheduler) File(java.io.File) MockTime(kafka.utils.MockTime) Setup(org.openjdk.jmh.annotations.Setup)

Example 2 with MockConfigRepository

use of kafka.server.metadata.MockConfigRepository in project kafka by apache.

the class ReplicaFetcherThreadBenchmark method setup.

@Setup(Level.Trial)
public void setup() throws IOException {
    if (!logDir.mkdir())
        throw new IOException("error creating test directory");
    scheduler.startup();
    Properties props = new Properties();
    props.put("zookeeper.connect", "127.0.0.1:9999");
    KafkaConfig config = new KafkaConfig(props);
    LogConfig logConfig = createLogConfig();
    BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
    LogDirFailureChannel logDirFailureChannel = Mockito.mock(LogDirFailureChannel.class);
    List<File> logDirs = Collections.singletonList(logDir);
    logManager = new LogManagerBuilder().setLogDirs(logDirs).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(new MockConfigRepository()).setInitialDefaultConfig(logConfig).setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false, "MD5")).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(logDirFailureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> initialFetched = new LinkedHashMap<>();
    HashMap<String, Uuid> topicIds = new HashMap<>();
    scala.collection.mutable.Map<TopicPartition, InitialFetchState> initialFetchStates = new scala.collection.mutable.HashMap<>();
    List<UpdateMetadataRequestData.UpdateMetadataPartitionState> updatePartitionState = new ArrayList<>();
    for (int i = 0; i < partitionCount; i++) {
        TopicPartition tp = new TopicPartition("topic", i);
        List<Integer> replicas = Arrays.asList(0, 1, 2);
        LeaderAndIsrRequestData.LeaderAndIsrPartitionState partitionState = new LeaderAndIsrRequestData.LeaderAndIsrPartitionState().setControllerEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(replicas).setZkVersion(1).setReplicas(replicas).setIsNew(true);
        IsrChangeListener isrChangeListener = Mockito.mock(IsrChangeListener.class);
        OffsetCheckpoints offsetCheckpoints = Mockito.mock(OffsetCheckpoints.class);
        Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Option.apply(0L));
        AlterIsrManager isrChannelManager = Mockito.mock(AlterIsrManager.class);
        Partition partition = new Partition(tp, 100, ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, isrChangeListener, new DelayedOperationsMock(tp), Mockito.mock(MetadataCache.class), logManager, isrChannelManager);
        partition.makeFollower(partitionState, offsetCheckpoints, topicId);
        pool.put(tp, partition);
        initialFetchStates.put(tp, new InitialFetchState(topicId, new BrokerEndPoint(3, "host", 3000), 0, 0));
        BaseRecords fetched = new BaseRecords() {

            @Override
            public int sizeInBytes() {
                return 0;
            }

            @Override
            public RecordsSend<? extends BaseRecords> toSend() {
                return null;
            }
        };
        initialFetched.put(new TopicIdPartition(topicId.get(), tp), new FetchResponseData.PartitionData().setPartitionIndex(tp.partition()).setLastStableOffset(0).setLogStartOffset(0).setRecords(fetched));
        updatePartitionState.add(new UpdateMetadataRequestData.UpdateMetadataPartitionState().setTopicName("topic").setPartitionIndex(i).setControllerEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(replicas).setZkVersion(1).setReplicas(replicas));
    }
    UpdateMetadataRequest updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion(), 0, 0, 0, updatePartitionState, Collections.emptyList(), topicIds).build();
    // TODO: fix to support raft
    ZkMetadataCache metadataCache = new ZkMetadataCache(0);
    metadataCache.updateMetadata(0, updateMetadataRequest);
    replicaManager = new ReplicaManagerBuilder().setConfig(config).setMetrics(metrics).setTime(new MockTime()).setZkClient(Mockito.mock(KafkaZkClient.class)).setScheduler(scheduler).setLogManager(logManager).setQuotaManagers(Mockito.mock(QuotaFactory.QuotaManagers.class)).setBrokerTopicStats(brokerTopicStats).setMetadataCache(metadataCache).setLogDirFailureChannel(new LogDirFailureChannel(logDirs.size())).setAlterIsrManager(TestUtils.createAlterIsrManager()).build();
    fetcher = new ReplicaFetcherBenchThread(config, replicaManager, pool);
    fetcher.addPartitions(initialFetchStates);
    // force a pass to move partitions to fetching state. We do this in the setup phase
    // so that we do not measure this time as part of the steady state work
    fetcher.doWork();
    // handle response to engage the incremental fetch session handler
    fetcher.fetchSessionHandler().handleResponse(FetchResponse.of(Errors.NONE, 0, 999, initialFetched), ApiKeys.FETCH.latestVersion());
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) OffsetCheckpoints(kafka.server.checkpoints.OffsetCheckpoints) LinkedHashMap(java.util.LinkedHashMap) BaseRecords(org.apache.kafka.common.record.BaseRecords) AlterIsrManager(kafka.server.AlterIsrManager) BrokerTopicStats(kafka.server.BrokerTopicStats) UpdateMetadataRequest(org.apache.kafka.common.requests.UpdateMetadataRequest) MockTime(kafka.utils.MockTime) IsrChangeListener(kafka.cluster.IsrChangeListener) ReplicaManagerBuilder(kafka.server.builders.ReplicaManagerBuilder) InitialFetchState(kafka.server.InitialFetchState) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) MockConfigRepository(kafka.server.metadata.MockConfigRepository) TopicPartition(org.apache.kafka.common.TopicPartition) File(java.io.File) LeaderAndIsrRequestData(org.apache.kafka.common.message.LeaderAndIsrRequestData) LogConfig(kafka.log.LogConfig) ZkMetadataCache(kafka.server.metadata.ZkMetadataCache) MetadataCache(kafka.server.MetadataCache) Properties(java.util.Properties) LogDirFailureChannel(kafka.server.LogDirFailureChannel) CleanerConfig(kafka.log.CleanerConfig) ZkMetadataCache(kafka.server.metadata.ZkMetadataCache) UpdateMetadataRequestData(org.apache.kafka.common.message.UpdateMetadataRequestData) Partition(kafka.cluster.Partition) TopicPartition(org.apache.kafka.common.TopicPartition) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) OffsetForLeaderPartition(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition) LogManagerBuilder(kafka.server.builders.LogManagerBuilder) IOException(java.io.IOException) BrokerEndPoint(kafka.cluster.BrokerEndPoint) Uuid(org.apache.kafka.common.Uuid) BrokerEndPoint(kafka.cluster.BrokerEndPoint) KafkaConfig(kafka.server.KafkaConfig) Setup(org.openjdk.jmh.annotations.Setup)

Example 3 with MockConfigRepository

use of kafka.server.metadata.MockConfigRepository in project kafka by apache.

the class UpdateFollowerFetchStateBenchmark method setUp.

@Setup(Level.Trial)
public void setUp() {
    scheduler.startup();
    LogConfig logConfig = createLogConfig();
    logManager = new LogManagerBuilder().setLogDirs(Collections.singletonList(logDir)).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(new MockConfigRepository()).setInitialDefaultConfig(logConfig).setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false, "MD5")).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(logDirFailureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
    OffsetCheckpoints offsetCheckpoints = Mockito.mock(OffsetCheckpoints.class);
    Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), topicPartition)).thenReturn(Option.apply(0L));
    DelayedOperations delayedOperations = new DelayedOperationsMock();
    // one leader, plus two followers
    List<Integer> replicas = new ArrayList<>();
    replicas.add(0);
    replicas.add(1);
    replicas.add(2);
    LeaderAndIsrPartitionState partitionState = new LeaderAndIsrPartitionState().setControllerEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(replicas).setZkVersion(1).setReplicas(replicas).setIsNew(true);
    IsrChangeListener isrChangeListener = Mockito.mock(IsrChangeListener.class);
    AlterIsrManager alterIsrManager = Mockito.mock(AlterIsrManager.class);
    partition = new Partition(topicPartition, 100, ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, isrChangeListener, delayedOperations, Mockito.mock(MetadataCache.class), logManager, alterIsrManager);
    partition.makeLeader(partitionState, offsetCheckpoints, topicId);
}
Also used : Partition(kafka.cluster.Partition) TopicPartition(org.apache.kafka.common.TopicPartition) IsrChangeListener(kafka.cluster.IsrChangeListener) ArrayList(java.util.ArrayList) LogManagerBuilder(kafka.server.builders.LogManagerBuilder) CleanerConfig(kafka.log.CleanerConfig) OffsetCheckpoints(kafka.server.checkpoints.OffsetCheckpoints) AlterIsrManager(kafka.server.AlterIsrManager) MockConfigRepository(kafka.server.metadata.MockConfigRepository) LogConfig(kafka.log.LogConfig) DelayedOperations(kafka.cluster.DelayedOperations) LeaderAndIsrPartitionState(org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState) Setup(org.openjdk.jmh.annotations.Setup)

Example 4 with MockConfigRepository

use of kafka.server.metadata.MockConfigRepository in project kafka by apache.

the class MetadataRequestBenchmark method createKafkaApis.

private KafkaApis createKafkaApis() {
    Properties kafkaProps = new Properties();
    kafkaProps.put(KafkaConfig$.MODULE$.ZkConnectProp(), "zk");
    kafkaProps.put(KafkaConfig$.MODULE$.BrokerIdProp(), brokerId + "");
    KafkaConfig config = new KafkaConfig(kafkaProps);
    return new KafkaApisBuilder().setRequestChannel(requestChannel).setMetadataSupport(new ZkSupport(adminManager, kafkaController, kafkaZkClient, Option.empty(), metadataCache)).setReplicaManager(replicaManager).setGroupCoordinator(groupCoordinator).setTxnCoordinator(transactionCoordinator).setAutoTopicCreationManager(autoTopicCreationManager).setBrokerId(brokerId).setConfig(config).setConfigRepository(new MockConfigRepository()).setMetadataCache(metadataCache).setMetrics(metrics).setAuthorizer(Optional.empty()).setQuotas(quotaManagers).setFetchManager(fetchManager).setBrokerTopicStats(brokerTopicStats).setClusterId("clusterId").setTime(Time.SYSTEM).setTokenManager(null).setApiVersionManager(new SimpleApiVersionManager(ApiMessageType.ListenerType.ZK_BROKER)).build();
}
Also used : MockConfigRepository(kafka.server.metadata.MockConfigRepository) KafkaApisBuilder(kafka.server.builders.KafkaApisBuilder) ZkSupport(kafka.server.ZkSupport) SimpleApiVersionManager(kafka.server.SimpleApiVersionManager) Properties(java.util.Properties) KafkaConfig(kafka.server.KafkaConfig)

Example 5 with MockConfigRepository

use of kafka.server.metadata.MockConfigRepository in project kafka by apache.

the class PartitionMakeFollowerBenchmark method setup.

@Setup(Level.Trial)
public void setup() throws IOException {
    if (!logDir.mkdir())
        throw new IOException("error creating test directory");
    scheduler.startup();
    LogConfig logConfig = createLogConfig();
    BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
    LogDirFailureChannel logDirFailureChannel = Mockito.mock(LogDirFailureChannel.class);
    logManager = new LogManagerBuilder().setLogDirs(Collections.singletonList(logDir)).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(new MockConfigRepository()).setInitialDefaultConfig(logConfig).setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false, "MD5")).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(logDirFailureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
    TopicPartition tp = new TopicPartition("topic", 0);
    topicId = OptionConverters.toScala(Optional.of(Uuid.randomUuid()));
    Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Option.apply(0L));
    IsrChangeListener isrChangeListener = Mockito.mock(IsrChangeListener.class);
    AlterIsrManager alterIsrManager = Mockito.mock(AlterIsrManager.class);
    partition = new Partition(tp, 100, ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, isrChangeListener, delayedOperations, Mockito.mock(MetadataCache.class), logManager, alterIsrManager);
    partition.createLogIfNotExists(true, false, offsetCheckpoints, topicId);
    executorService.submit((Runnable) () -> {
        SimpleRecord[] simpleRecords = new SimpleRecord[] { new SimpleRecord(1L, "foo".getBytes(StandardCharsets.UTF_8), "1".getBytes(StandardCharsets.UTF_8)), new SimpleRecord(2L, "bar".getBytes(StandardCharsets.UTF_8), "2".getBytes(StandardCharsets.UTF_8)) };
        int initialOffSet = 0;
        while (true) {
            MemoryRecords memoryRecords = MemoryRecords.withRecords(initialOffSet, CompressionType.NONE, 0, simpleRecords);
            partition.appendRecordsToFollowerOrFutureReplica(memoryRecords, false);
            initialOffSet = initialOffSet + 2;
        }
    });
}
Also used : Partition(kafka.cluster.Partition) TopicPartition(org.apache.kafka.common.TopicPartition) IsrChangeListener(kafka.cluster.IsrChangeListener) LogManagerBuilder(kafka.server.builders.LogManagerBuilder) IOException(java.io.IOException) LogDirFailureChannel(kafka.server.LogDirFailureChannel) CleanerConfig(kafka.log.CleanerConfig) AlterIsrManager(kafka.server.AlterIsrManager) BrokerTopicStats(kafka.server.BrokerTopicStats) MockConfigRepository(kafka.server.metadata.MockConfigRepository) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) LogConfig(kafka.log.LogConfig) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Setup(org.openjdk.jmh.annotations.Setup)

Aggregations

MockConfigRepository (kafka.server.metadata.MockConfigRepository)6 CleanerConfig (kafka.log.CleanerConfig)5 Setup (org.openjdk.jmh.annotations.Setup)5 Partition (kafka.cluster.Partition)4 LogConfig (kafka.log.LogConfig)4 AlterIsrManager (kafka.server.AlterIsrManager)4 BrokerTopicStats (kafka.server.BrokerTopicStats)4 LogDirFailureChannel (kafka.server.LogDirFailureChannel)4 LogManagerBuilder (kafka.server.builders.LogManagerBuilder)4 TopicPartition (org.apache.kafka.common.TopicPartition)4 File (java.io.File)3 ArrayList (java.util.ArrayList)3 Properties (java.util.Properties)3 IsrChangeListener (kafka.cluster.IsrChangeListener)3 KafkaConfig (kafka.server.KafkaConfig)3 ReplicaManagerBuilder (kafka.server.builders.ReplicaManagerBuilder)3 OffsetCheckpoints (kafka.server.checkpoints.OffsetCheckpoints)3 IOException (java.io.IOException)2 MetadataCache (kafka.server.MetadataCache)2 ZkMetadataCache (kafka.server.metadata.ZkMetadataCache)2