Search in sources :

Example 1 with BaseRecords

use of org.apache.kafka.common.record.BaseRecords in project kafka by apache.

the class ReplicaFetcherThreadBenchmark method setup.

@Setup(Level.Trial)
public void setup() throws IOException {
    if (!logDir.mkdir())
        throw new IOException("error creating test directory");
    scheduler.startup();
    Properties props = new Properties();
    props.put("zookeeper.connect", "127.0.0.1:9999");
    KafkaConfig config = new KafkaConfig(props);
    LogConfig logConfig = createLogConfig();
    BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
    LogDirFailureChannel logDirFailureChannel = Mockito.mock(LogDirFailureChannel.class);
    List<File> logDirs = Collections.singletonList(logDir);
    logManager = new LogManagerBuilder().setLogDirs(logDirs).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(new MockConfigRepository()).setInitialDefaultConfig(logConfig).setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false, "MD5")).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(logDirFailureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> initialFetched = new LinkedHashMap<>();
    HashMap<String, Uuid> topicIds = new HashMap<>();
    scala.collection.mutable.Map<TopicPartition, InitialFetchState> initialFetchStates = new scala.collection.mutable.HashMap<>();
    List<UpdateMetadataRequestData.UpdateMetadataPartitionState> updatePartitionState = new ArrayList<>();
    for (int i = 0; i < partitionCount; i++) {
        TopicPartition tp = new TopicPartition("topic", i);
        List<Integer> replicas = Arrays.asList(0, 1, 2);
        LeaderAndIsrRequestData.LeaderAndIsrPartitionState partitionState = new LeaderAndIsrRequestData.LeaderAndIsrPartitionState().setControllerEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(replicas).setZkVersion(1).setReplicas(replicas).setIsNew(true);
        IsrChangeListener isrChangeListener = Mockito.mock(IsrChangeListener.class);
        OffsetCheckpoints offsetCheckpoints = Mockito.mock(OffsetCheckpoints.class);
        Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Option.apply(0L));
        AlterIsrManager isrChannelManager = Mockito.mock(AlterIsrManager.class);
        Partition partition = new Partition(tp, 100, ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, isrChangeListener, new DelayedOperationsMock(tp), Mockito.mock(MetadataCache.class), logManager, isrChannelManager);
        partition.makeFollower(partitionState, offsetCheckpoints, topicId);
        pool.put(tp, partition);
        initialFetchStates.put(tp, new InitialFetchState(topicId, new BrokerEndPoint(3, "host", 3000), 0, 0));
        BaseRecords fetched = new BaseRecords() {

            @Override
            public int sizeInBytes() {
                return 0;
            }

            @Override
            public RecordsSend<? extends BaseRecords> toSend() {
                return null;
            }
        };
        initialFetched.put(new TopicIdPartition(topicId.get(), tp), new FetchResponseData.PartitionData().setPartitionIndex(tp.partition()).setLastStableOffset(0).setLogStartOffset(0).setRecords(fetched));
        updatePartitionState.add(new UpdateMetadataRequestData.UpdateMetadataPartitionState().setTopicName("topic").setPartitionIndex(i).setControllerEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(replicas).setZkVersion(1).setReplicas(replicas));
    }
    UpdateMetadataRequest updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion(), 0, 0, 0, updatePartitionState, Collections.emptyList(), topicIds).build();
    // TODO: fix to support raft
    ZkMetadataCache metadataCache = new ZkMetadataCache(0);
    metadataCache.updateMetadata(0, updateMetadataRequest);
    replicaManager = new ReplicaManagerBuilder().setConfig(config).setMetrics(metrics).setTime(new MockTime()).setZkClient(Mockito.mock(KafkaZkClient.class)).setScheduler(scheduler).setLogManager(logManager).setQuotaManagers(Mockito.mock(QuotaFactory.QuotaManagers.class)).setBrokerTopicStats(brokerTopicStats).setMetadataCache(metadataCache).setLogDirFailureChannel(new LogDirFailureChannel(logDirs.size())).setAlterIsrManager(TestUtils.createAlterIsrManager()).build();
    fetcher = new ReplicaFetcherBenchThread(config, replicaManager, pool);
    fetcher.addPartitions(initialFetchStates);
    // force a pass to move partitions to fetching state. We do this in the setup phase
    // so that we do not measure this time as part of the steady state work
    fetcher.doWork();
    // handle response to engage the incremental fetch session handler
    fetcher.fetchSessionHandler().handleResponse(FetchResponse.of(Errors.NONE, 0, 999, initialFetched), ApiKeys.FETCH.latestVersion());
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) OffsetCheckpoints(kafka.server.checkpoints.OffsetCheckpoints) LinkedHashMap(java.util.LinkedHashMap) BaseRecords(org.apache.kafka.common.record.BaseRecords) AlterIsrManager(kafka.server.AlterIsrManager) BrokerTopicStats(kafka.server.BrokerTopicStats) UpdateMetadataRequest(org.apache.kafka.common.requests.UpdateMetadataRequest) MockTime(kafka.utils.MockTime) IsrChangeListener(kafka.cluster.IsrChangeListener) ReplicaManagerBuilder(kafka.server.builders.ReplicaManagerBuilder) InitialFetchState(kafka.server.InitialFetchState) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) MockConfigRepository(kafka.server.metadata.MockConfigRepository) TopicPartition(org.apache.kafka.common.TopicPartition) File(java.io.File) LeaderAndIsrRequestData(org.apache.kafka.common.message.LeaderAndIsrRequestData) LogConfig(kafka.log.LogConfig) ZkMetadataCache(kafka.server.metadata.ZkMetadataCache) MetadataCache(kafka.server.MetadataCache) Properties(java.util.Properties) LogDirFailureChannel(kafka.server.LogDirFailureChannel) CleanerConfig(kafka.log.CleanerConfig) ZkMetadataCache(kafka.server.metadata.ZkMetadataCache) UpdateMetadataRequestData(org.apache.kafka.common.message.UpdateMetadataRequestData) Partition(kafka.cluster.Partition) TopicPartition(org.apache.kafka.common.TopicPartition) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) OffsetForLeaderPartition(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition) LogManagerBuilder(kafka.server.builders.LogManagerBuilder) IOException(java.io.IOException) BrokerEndPoint(kafka.cluster.BrokerEndPoint) Uuid(org.apache.kafka.common.Uuid) BrokerEndPoint(kafka.cluster.BrokerEndPoint) KafkaConfig(kafka.server.KafkaConfig) Setup(org.openjdk.jmh.annotations.Setup)

Example 2 with BaseRecords

use of org.apache.kafka.common.record.BaseRecords in project kafka by apache.

the class ProduceRequest method validateRecords.

public static void validateRecords(short version, BaseRecords baseRecords) {
    if (version >= 3) {
        if (baseRecords instanceof Records) {
            Records records = (Records) baseRecords;
            Iterator<? extends RecordBatch> iterator = records.batches().iterator();
            if (!iterator.hasNext())
                throw new InvalidRecordException("Produce requests with version " + version + " must have at least " + "one record batch");
            RecordBatch entry = iterator.next();
            if (entry.magic() != RecordBatch.MAGIC_VALUE_V2)
                throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + "contain record batches with magic version 2");
            if (version < 7 && entry.compressionType() == CompressionType.ZSTD) {
                throw new UnsupportedCompressionTypeException("Produce requests with version " + version + " are not allowed to " + "use ZStandard compression");
            }
            if (iterator.hasNext())
                throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + "contain exactly one record batch");
        }
    }
// Note that we do not do similar validation for older versions to ensure compatibility with
// clients which send the wrong magic version in the wrong version of the produce request. The broker
// did not do this validation before, so we maintain that behavior here.
}
Also used : UnsupportedCompressionTypeException(org.apache.kafka.common.errors.UnsupportedCompressionTypeException) RecordBatch(org.apache.kafka.common.record.RecordBatch) Records(org.apache.kafka.common.record.Records) BaseRecords(org.apache.kafka.common.record.BaseRecords) InvalidRecordException(org.apache.kafka.common.InvalidRecordException)

Aggregations

BaseRecords (org.apache.kafka.common.record.BaseRecords)2 File (java.io.File)1 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 LinkedHashMap (java.util.LinkedHashMap)1 Properties (java.util.Properties)1 BrokerEndPoint (kafka.cluster.BrokerEndPoint)1 IsrChangeListener (kafka.cluster.IsrChangeListener)1 Partition (kafka.cluster.Partition)1 CleanerConfig (kafka.log.CleanerConfig)1 LogConfig (kafka.log.LogConfig)1 AlterIsrManager (kafka.server.AlterIsrManager)1 BrokerTopicStats (kafka.server.BrokerTopicStats)1 InitialFetchState (kafka.server.InitialFetchState)1 KafkaConfig (kafka.server.KafkaConfig)1 LogDirFailureChannel (kafka.server.LogDirFailureChannel)1 MetadataCache (kafka.server.MetadataCache)1 LogManagerBuilder (kafka.server.builders.LogManagerBuilder)1 ReplicaManagerBuilder (kafka.server.builders.ReplicaManagerBuilder)1