Search in sources :

Example 1 with CleanerConfig

use of kafka.log.CleanerConfig in project kafka by apache.

the class ReplicaFetcherThreadBenchmark method setup.

@Setup(Level.Trial)
public void setup() throws IOException {
    if (!logDir.mkdir())
        throw new IOException("error creating test directory");
    scheduler.startup();
    Properties props = new Properties();
    props.put("zookeeper.connect", "127.0.0.1:9999");
    KafkaConfig config = new KafkaConfig(props);
    LogConfig logConfig = createLogConfig();
    BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
    LogDirFailureChannel logDirFailureChannel = Mockito.mock(LogDirFailureChannel.class);
    List<File> logDirs = Collections.singletonList(logDir);
    logManager = new LogManagerBuilder().setLogDirs(logDirs).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(new MockConfigRepository()).setInitialDefaultConfig(logConfig).setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false, "MD5")).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(logDirFailureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> initialFetched = new LinkedHashMap<>();
    HashMap<String, Uuid> topicIds = new HashMap<>();
    scala.collection.mutable.Map<TopicPartition, InitialFetchState> initialFetchStates = new scala.collection.mutable.HashMap<>();
    List<UpdateMetadataRequestData.UpdateMetadataPartitionState> updatePartitionState = new ArrayList<>();
    for (int i = 0; i < partitionCount; i++) {
        TopicPartition tp = new TopicPartition("topic", i);
        List<Integer> replicas = Arrays.asList(0, 1, 2);
        LeaderAndIsrRequestData.LeaderAndIsrPartitionState partitionState = new LeaderAndIsrRequestData.LeaderAndIsrPartitionState().setControllerEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(replicas).setZkVersion(1).setReplicas(replicas).setIsNew(true);
        IsrChangeListener isrChangeListener = Mockito.mock(IsrChangeListener.class);
        OffsetCheckpoints offsetCheckpoints = Mockito.mock(OffsetCheckpoints.class);
        Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Option.apply(0L));
        AlterIsrManager isrChannelManager = Mockito.mock(AlterIsrManager.class);
        Partition partition = new Partition(tp, 100, ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, isrChangeListener, new DelayedOperationsMock(tp), Mockito.mock(MetadataCache.class), logManager, isrChannelManager);
        partition.makeFollower(partitionState, offsetCheckpoints, topicId);
        pool.put(tp, partition);
        initialFetchStates.put(tp, new InitialFetchState(topicId, new BrokerEndPoint(3, "host", 3000), 0, 0));
        BaseRecords fetched = new BaseRecords() {

            @Override
            public int sizeInBytes() {
                return 0;
            }

            @Override
            public RecordsSend<? extends BaseRecords> toSend() {
                return null;
            }
        };
        initialFetched.put(new TopicIdPartition(topicId.get(), tp), new FetchResponseData.PartitionData().setPartitionIndex(tp.partition()).setLastStableOffset(0).setLogStartOffset(0).setRecords(fetched));
        updatePartitionState.add(new UpdateMetadataRequestData.UpdateMetadataPartitionState().setTopicName("topic").setPartitionIndex(i).setControllerEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(replicas).setZkVersion(1).setReplicas(replicas));
    }
    UpdateMetadataRequest updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion(), 0, 0, 0, updatePartitionState, Collections.emptyList(), topicIds).build();
    // TODO: fix to support raft
    ZkMetadataCache metadataCache = new ZkMetadataCache(0);
    metadataCache.updateMetadata(0, updateMetadataRequest);
    replicaManager = new ReplicaManagerBuilder().setConfig(config).setMetrics(metrics).setTime(new MockTime()).setZkClient(Mockito.mock(KafkaZkClient.class)).setScheduler(scheduler).setLogManager(logManager).setQuotaManagers(Mockito.mock(QuotaFactory.QuotaManagers.class)).setBrokerTopicStats(brokerTopicStats).setMetadataCache(metadataCache).setLogDirFailureChannel(new LogDirFailureChannel(logDirs.size())).setAlterIsrManager(TestUtils.createAlterIsrManager()).build();
    fetcher = new ReplicaFetcherBenchThread(config, replicaManager, pool);
    fetcher.addPartitions(initialFetchStates);
    // force a pass to move partitions to fetching state. We do this in the setup phase
    // so that we do not measure this time as part of the steady state work
    fetcher.doWork();
    // handle response to engage the incremental fetch session handler
    fetcher.fetchSessionHandler().handleResponse(FetchResponse.of(Errors.NONE, 0, 999, initialFetched), ApiKeys.FETCH.latestVersion());
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) OffsetCheckpoints(kafka.server.checkpoints.OffsetCheckpoints) LinkedHashMap(java.util.LinkedHashMap) BaseRecords(org.apache.kafka.common.record.BaseRecords) AlterIsrManager(kafka.server.AlterIsrManager) BrokerTopicStats(kafka.server.BrokerTopicStats) UpdateMetadataRequest(org.apache.kafka.common.requests.UpdateMetadataRequest) MockTime(kafka.utils.MockTime) IsrChangeListener(kafka.cluster.IsrChangeListener) ReplicaManagerBuilder(kafka.server.builders.ReplicaManagerBuilder) InitialFetchState(kafka.server.InitialFetchState) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) MockConfigRepository(kafka.server.metadata.MockConfigRepository) TopicPartition(org.apache.kafka.common.TopicPartition) File(java.io.File) LeaderAndIsrRequestData(org.apache.kafka.common.message.LeaderAndIsrRequestData) LogConfig(kafka.log.LogConfig) ZkMetadataCache(kafka.server.metadata.ZkMetadataCache) MetadataCache(kafka.server.MetadataCache) Properties(java.util.Properties) LogDirFailureChannel(kafka.server.LogDirFailureChannel) CleanerConfig(kafka.log.CleanerConfig) ZkMetadataCache(kafka.server.metadata.ZkMetadataCache) UpdateMetadataRequestData(org.apache.kafka.common.message.UpdateMetadataRequestData) Partition(kafka.cluster.Partition) TopicPartition(org.apache.kafka.common.TopicPartition) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) OffsetForLeaderPartition(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition) LogManagerBuilder(kafka.server.builders.LogManagerBuilder) IOException(java.io.IOException) BrokerEndPoint(kafka.cluster.BrokerEndPoint) Uuid(org.apache.kafka.common.Uuid) BrokerEndPoint(kafka.cluster.BrokerEndPoint) KafkaConfig(kafka.server.KafkaConfig) Setup(org.openjdk.jmh.annotations.Setup)

Example 2 with CleanerConfig

use of kafka.log.CleanerConfig in project kafka by apache.

the class UpdateFollowerFetchStateBenchmark method setUp.

@Setup(Level.Trial)
public void setUp() {
    scheduler.startup();
    LogConfig logConfig = createLogConfig();
    logManager = new LogManagerBuilder().setLogDirs(Collections.singletonList(logDir)).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(new MockConfigRepository()).setInitialDefaultConfig(logConfig).setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false, "MD5")).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(logDirFailureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
    OffsetCheckpoints offsetCheckpoints = Mockito.mock(OffsetCheckpoints.class);
    Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), topicPartition)).thenReturn(Option.apply(0L));
    DelayedOperations delayedOperations = new DelayedOperationsMock();
    // one leader, plus two followers
    List<Integer> replicas = new ArrayList<>();
    replicas.add(0);
    replicas.add(1);
    replicas.add(2);
    LeaderAndIsrPartitionState partitionState = new LeaderAndIsrPartitionState().setControllerEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(replicas).setZkVersion(1).setReplicas(replicas).setIsNew(true);
    IsrChangeListener isrChangeListener = Mockito.mock(IsrChangeListener.class);
    AlterIsrManager alterIsrManager = Mockito.mock(AlterIsrManager.class);
    partition = new Partition(topicPartition, 100, ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, isrChangeListener, delayedOperations, Mockito.mock(MetadataCache.class), logManager, alterIsrManager);
    partition.makeLeader(partitionState, offsetCheckpoints, topicId);
}
Also used : Partition(kafka.cluster.Partition) TopicPartition(org.apache.kafka.common.TopicPartition) IsrChangeListener(kafka.cluster.IsrChangeListener) ArrayList(java.util.ArrayList) LogManagerBuilder(kafka.server.builders.LogManagerBuilder) CleanerConfig(kafka.log.CleanerConfig) OffsetCheckpoints(kafka.server.checkpoints.OffsetCheckpoints) AlterIsrManager(kafka.server.AlterIsrManager) MockConfigRepository(kafka.server.metadata.MockConfigRepository) LogConfig(kafka.log.LogConfig) DelayedOperations(kafka.cluster.DelayedOperations) LeaderAndIsrPartitionState(org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState) Setup(org.openjdk.jmh.annotations.Setup)

Example 3 with CleanerConfig

use of kafka.log.CleanerConfig in project kafka by apache.

the class PartitionMakeFollowerBenchmark method setup.

@Setup(Level.Trial)
public void setup() throws IOException {
    if (!logDir.mkdir())
        throw new IOException("error creating test directory");
    scheduler.startup();
    LogConfig logConfig = createLogConfig();
    BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
    LogDirFailureChannel logDirFailureChannel = Mockito.mock(LogDirFailureChannel.class);
    logManager = new LogManagerBuilder().setLogDirs(Collections.singletonList(logDir)).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(new MockConfigRepository()).setInitialDefaultConfig(logConfig).setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false, "MD5")).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(logDirFailureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
    TopicPartition tp = new TopicPartition("topic", 0);
    topicId = OptionConverters.toScala(Optional.of(Uuid.randomUuid()));
    Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Option.apply(0L));
    IsrChangeListener isrChangeListener = Mockito.mock(IsrChangeListener.class);
    AlterIsrManager alterIsrManager = Mockito.mock(AlterIsrManager.class);
    partition = new Partition(tp, 100, ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, isrChangeListener, delayedOperations, Mockito.mock(MetadataCache.class), logManager, alterIsrManager);
    partition.createLogIfNotExists(true, false, offsetCheckpoints, topicId);
    executorService.submit((Runnable) () -> {
        SimpleRecord[] simpleRecords = new SimpleRecord[] { new SimpleRecord(1L, "foo".getBytes(StandardCharsets.UTF_8), "1".getBytes(StandardCharsets.UTF_8)), new SimpleRecord(2L, "bar".getBytes(StandardCharsets.UTF_8), "2".getBytes(StandardCharsets.UTF_8)) };
        int initialOffSet = 0;
        while (true) {
            MemoryRecords memoryRecords = MemoryRecords.withRecords(initialOffSet, CompressionType.NONE, 0, simpleRecords);
            partition.appendRecordsToFollowerOrFutureReplica(memoryRecords, false);
            initialOffSet = initialOffSet + 2;
        }
    });
}
Also used : Partition(kafka.cluster.Partition) TopicPartition(org.apache.kafka.common.TopicPartition) IsrChangeListener(kafka.cluster.IsrChangeListener) LogManagerBuilder(kafka.server.builders.LogManagerBuilder) IOException(java.io.IOException) LogDirFailureChannel(kafka.server.LogDirFailureChannel) CleanerConfig(kafka.log.CleanerConfig) AlterIsrManager(kafka.server.AlterIsrManager) BrokerTopicStats(kafka.server.BrokerTopicStats) MockConfigRepository(kafka.server.metadata.MockConfigRepository) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) LogConfig(kafka.log.LogConfig) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Setup(org.openjdk.jmh.annotations.Setup)

Example 4 with CleanerConfig

use of kafka.log.CleanerConfig in project kafka by apache.

the class PartitionCreationBench method setup.

@SuppressWarnings("deprecation")
@Setup(Level.Invocation)
public void setup() {
    if (useTopicIds)
        topicId = Option.apply(Uuid.randomUuid());
    else
        topicId = Option.empty();
    this.scheduler = new KafkaScheduler(1, "scheduler-thread", true);
    this.brokerProperties = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect(), true, true, 9092, Option.empty(), Option.empty(), Option.empty(), true, false, 0, false, 0, false, 0, Option.empty(), 1, true, 1, (short) 1));
    this.metrics = new Metrics();
    this.time = Time.SYSTEM;
    this.failureChannel = new LogDirFailureChannel(brokerProperties.logDirs().size());
    final BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
    final List<File> files = JavaConverters.seqAsJavaList(brokerProperties.logDirs()).stream().map(File::new).collect(Collectors.toList());
    CleanerConfig cleanerConfig = CleanerConfig.apply(1, 4 * 1024 * 1024L, 0.9d, 1024 * 1024, 32 * 1024 * 1024, Double.MAX_VALUE, 15 * 1000, true, "MD5");
    ConfigRepository configRepository = new MockConfigRepository();
    this.logManager = new LogManagerBuilder().setLogDirs(files).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(configRepository).setInitialDefaultConfig(createLogConfig()).setCleanerConfig(cleanerConfig).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(failureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
    scheduler.startup();
    this.quotaManagers = QuotaFactory.instantiate(this.brokerProperties, this.metrics, this.time, "");
    this.zkClient = new KafkaZkClient(null, false, Time.SYSTEM) {

        @Override
        public Properties getEntityConfigs(String rootEntityType, String sanitizedEntityName) {
            return new Properties();
        }
    };
    this.alterIsrManager = TestUtils.createAlterIsrManager();
    this.replicaManager = new ReplicaManagerBuilder().setConfig(brokerProperties).setMetrics(metrics).setTime(time).setZkClient(zkClient).setScheduler(scheduler).setLogManager(logManager).setQuotaManagers(quotaManagers).setBrokerTopicStats(brokerTopicStats).setMetadataCache(new ZkMetadataCache(this.brokerProperties.brokerId())).setLogDirFailureChannel(failureChannel).setAlterIsrManager(alterIsrManager).build();
    replicaManager.startup();
    replicaManager.checkpointHighWatermarks();
}
Also used : ConfigRepository(kafka.server.metadata.ConfigRepository) MockConfigRepository(kafka.server.metadata.MockConfigRepository) LogManagerBuilder(kafka.server.builders.LogManagerBuilder) ReplicaManagerBuilder(kafka.server.builders.ReplicaManagerBuilder) LogDirFailureChannel(kafka.server.LogDirFailureChannel) CleanerConfig(kafka.log.CleanerConfig) KafkaZkClient(kafka.zk.KafkaZkClient) Properties(java.util.Properties) ZkMetadataCache(kafka.server.metadata.ZkMetadataCache) Metrics(org.apache.kafka.common.metrics.Metrics) BrokerTopicStats(kafka.server.BrokerTopicStats) MockConfigRepository(kafka.server.metadata.MockConfigRepository) KafkaScheduler(kafka.utils.KafkaScheduler) File(java.io.File) Setup(org.openjdk.jmh.annotations.Setup)

Aggregations

CleanerConfig (kafka.log.CleanerConfig)4 LogManagerBuilder (kafka.server.builders.LogManagerBuilder)4 MockConfigRepository (kafka.server.metadata.MockConfigRepository)4 Setup (org.openjdk.jmh.annotations.Setup)4 IsrChangeListener (kafka.cluster.IsrChangeListener)3 Partition (kafka.cluster.Partition)3 LogConfig (kafka.log.LogConfig)3 AlterIsrManager (kafka.server.AlterIsrManager)3 BrokerTopicStats (kafka.server.BrokerTopicStats)3 LogDirFailureChannel (kafka.server.LogDirFailureChannel)3 TopicPartition (org.apache.kafka.common.TopicPartition)3 File (java.io.File)2 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 Properties (java.util.Properties)2 ReplicaManagerBuilder (kafka.server.builders.ReplicaManagerBuilder)2 OffsetCheckpoints (kafka.server.checkpoints.OffsetCheckpoints)2 ZkMetadataCache (kafka.server.metadata.ZkMetadataCache)2 HashMap (java.util.HashMap)1 LinkedHashMap (java.util.LinkedHashMap)1