Search in sources :

Example 31 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class ReplicaFetcherThreadBenchmark method setup.

@Setup(Level.Trial)
public void setup() throws IOException {
    if (!logDir.mkdir())
        throw new IOException("error creating test directory");
    scheduler.startup();
    Properties props = new Properties();
    props.put("zookeeper.connect", "127.0.0.1:9999");
    KafkaConfig config = new KafkaConfig(props);
    LogConfig logConfig = createLogConfig();
    BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
    LogDirFailureChannel logDirFailureChannel = Mockito.mock(LogDirFailureChannel.class);
    List<File> logDirs = Collections.singletonList(logDir);
    logManager = new LogManagerBuilder().setLogDirs(logDirs).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(new MockConfigRepository()).setInitialDefaultConfig(logConfig).setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false, "MD5")).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(logDirFailureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> initialFetched = new LinkedHashMap<>();
    HashMap<String, Uuid> topicIds = new HashMap<>();
    scala.collection.mutable.Map<TopicPartition, InitialFetchState> initialFetchStates = new scala.collection.mutable.HashMap<>();
    List<UpdateMetadataRequestData.UpdateMetadataPartitionState> updatePartitionState = new ArrayList<>();
    for (int i = 0; i < partitionCount; i++) {
        TopicPartition tp = new TopicPartition("topic", i);
        List<Integer> replicas = Arrays.asList(0, 1, 2);
        LeaderAndIsrRequestData.LeaderAndIsrPartitionState partitionState = new LeaderAndIsrRequestData.LeaderAndIsrPartitionState().setControllerEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(replicas).setZkVersion(1).setReplicas(replicas).setIsNew(true);
        IsrChangeListener isrChangeListener = Mockito.mock(IsrChangeListener.class);
        OffsetCheckpoints offsetCheckpoints = Mockito.mock(OffsetCheckpoints.class);
        Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Option.apply(0L));
        AlterIsrManager isrChannelManager = Mockito.mock(AlterIsrManager.class);
        Partition partition = new Partition(tp, 100, ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, isrChangeListener, new DelayedOperationsMock(tp), Mockito.mock(MetadataCache.class), logManager, isrChannelManager);
        partition.makeFollower(partitionState, offsetCheckpoints, topicId);
        pool.put(tp, partition);
        initialFetchStates.put(tp, new InitialFetchState(topicId, new BrokerEndPoint(3, "host", 3000), 0, 0));
        BaseRecords fetched = new BaseRecords() {

            @Override
            public int sizeInBytes() {
                return 0;
            }

            @Override
            public RecordsSend<? extends BaseRecords> toSend() {
                return null;
            }
        };
        initialFetched.put(new TopicIdPartition(topicId.get(), tp), new FetchResponseData.PartitionData().setPartitionIndex(tp.partition()).setLastStableOffset(0).setLogStartOffset(0).setRecords(fetched));
        updatePartitionState.add(new UpdateMetadataRequestData.UpdateMetadataPartitionState().setTopicName("topic").setPartitionIndex(i).setControllerEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(replicas).setZkVersion(1).setReplicas(replicas));
    }
    UpdateMetadataRequest updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion(), 0, 0, 0, updatePartitionState, Collections.emptyList(), topicIds).build();
    // TODO: fix to support raft
    ZkMetadataCache metadataCache = new ZkMetadataCache(0);
    metadataCache.updateMetadata(0, updateMetadataRequest);
    replicaManager = new ReplicaManagerBuilder().setConfig(config).setMetrics(metrics).setTime(new MockTime()).setZkClient(Mockito.mock(KafkaZkClient.class)).setScheduler(scheduler).setLogManager(logManager).setQuotaManagers(Mockito.mock(QuotaFactory.QuotaManagers.class)).setBrokerTopicStats(brokerTopicStats).setMetadataCache(metadataCache).setLogDirFailureChannel(new LogDirFailureChannel(logDirs.size())).setAlterIsrManager(TestUtils.createAlterIsrManager()).build();
    fetcher = new ReplicaFetcherBenchThread(config, replicaManager, pool);
    fetcher.addPartitions(initialFetchStates);
    // force a pass to move partitions to fetching state. We do this in the setup phase
    // so that we do not measure this time as part of the steady state work
    fetcher.doWork();
    // handle response to engage the incremental fetch session handler
    fetcher.fetchSessionHandler().handleResponse(FetchResponse.of(Errors.NONE, 0, 999, initialFetched), ApiKeys.FETCH.latestVersion());
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) OffsetCheckpoints(kafka.server.checkpoints.OffsetCheckpoints) LinkedHashMap(java.util.LinkedHashMap) BaseRecords(org.apache.kafka.common.record.BaseRecords) AlterIsrManager(kafka.server.AlterIsrManager) BrokerTopicStats(kafka.server.BrokerTopicStats) UpdateMetadataRequest(org.apache.kafka.common.requests.UpdateMetadataRequest) MockTime(kafka.utils.MockTime) IsrChangeListener(kafka.cluster.IsrChangeListener) ReplicaManagerBuilder(kafka.server.builders.ReplicaManagerBuilder) InitialFetchState(kafka.server.InitialFetchState) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) MockConfigRepository(kafka.server.metadata.MockConfigRepository) TopicPartition(org.apache.kafka.common.TopicPartition) File(java.io.File) LeaderAndIsrRequestData(org.apache.kafka.common.message.LeaderAndIsrRequestData) LogConfig(kafka.log.LogConfig) ZkMetadataCache(kafka.server.metadata.ZkMetadataCache) MetadataCache(kafka.server.MetadataCache) Properties(java.util.Properties) LogDirFailureChannel(kafka.server.LogDirFailureChannel) CleanerConfig(kafka.log.CleanerConfig) ZkMetadataCache(kafka.server.metadata.ZkMetadataCache) UpdateMetadataRequestData(org.apache.kafka.common.message.UpdateMetadataRequestData) Partition(kafka.cluster.Partition) TopicPartition(org.apache.kafka.common.TopicPartition) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) OffsetForLeaderPartition(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition) LogManagerBuilder(kafka.server.builders.LogManagerBuilder) IOException(java.io.IOException) BrokerEndPoint(kafka.cluster.BrokerEndPoint) Uuid(org.apache.kafka.common.Uuid) BrokerEndPoint(kafka.cluster.BrokerEndPoint) KafkaConfig(kafka.server.KafkaConfig) Setup(org.openjdk.jmh.annotations.Setup)

Example 32 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class FetchSessionBenchmark method setUp.

@Setup(Level.Trial)
public void setUp() {
    fetches = new LinkedHashMap<>();
    handler = new FetchSessionHandler(LOG_CONTEXT, 1);
    topicIds = new HashMap<>();
    FetchSessionHandler.Builder builder = handler.newBuilder();
    Uuid id = Uuid.randomUuid();
    topicIds.put("foo", id);
    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> respMap = new LinkedHashMap<>();
    for (int i = 0; i < partitionCount; i++) {
        TopicPartition tp = new TopicPartition("foo", i);
        FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData(id, 0, 0, 200, Optional.empty());
        fetches.put(tp, partitionData);
        builder.add(tp, partitionData);
        respMap.put(new TopicIdPartition(id, tp), new FetchResponseData.PartitionData().setPartitionIndex(tp.partition()).setLastStableOffset(0).setLogStartOffset(0));
    }
    builder.build();
    // build and handle an initial response so that the next fetch will be incremental
    handler.handleResponse(FetchResponse.of(Errors.NONE, 0, 1, respMap), ApiKeys.FETCH.latestVersion());
    int counter = 0;
    for (TopicPartition topicPartition : new ArrayList<>(fetches.keySet())) {
        if (updatedPercentage != 0 && counter % (100 / updatedPercentage) == 0) {
            // reorder in fetch session, and update log start offset
            fetches.remove(topicPartition);
            fetches.put(topicPartition, new FetchRequest.PartitionData(id, 50, 40, 200, Optional.empty()));
        }
        counter++;
    }
}
Also used : ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) LinkedHashMap(java.util.LinkedHashMap) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Setup(org.openjdk.jmh.annotations.Setup)

Example 33 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class RequestResponseTest method testSerializationSpecialCases.

// This test validates special cases that are not checked in testSerialization
@Test
public void testSerializationSpecialCases() {
    // Produce
    checkResponse(createProduceResponseWithErrorMessage(), (short) 8);
    // Fetch
    checkResponse(createFetchResponse(true), (short) 4);
    List<TopicIdPartition> toForgetTopics = new ArrayList<>();
    toForgetTopics.add(new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("foo", 0)));
    toForgetTopics.add(new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("foo", 2)));
    toForgetTopics.add(new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("bar", 0)));
    checkRequest(createFetchRequest((short) 7, new FetchMetadata(123, 456), toForgetTopics));
    checkResponse(createFetchResponse(123), (short) 7);
    checkResponse(createFetchResponse(Errors.FETCH_SESSION_ID_NOT_FOUND, 123), (short) 7);
    checkOlderFetchVersions();
    // Metadata
    checkRequest(MetadataRequest.Builder.allTopics().build((short) 2));
    // OffsetFetch
    checkRequest(createOffsetFetchRequestWithMultipleGroups((short) 8, true));
    checkRequest(createOffsetFetchRequestWithMultipleGroups((short) 8, false));
    checkRequest(createOffsetFetchRequestForAllPartition((short) 7, true));
    checkRequest(createOffsetFetchRequestForAllPartition((short) 8, true));
    checkErrorResponse(createOffsetFetchRequestWithMultipleGroups((short) 8, true), unknownServerException);
    checkErrorResponse(createOffsetFetchRequestForAllPartition((short) 7, true), new NotCoordinatorException("Not Coordinator"));
    checkErrorResponse(createOffsetFetchRequestForAllPartition((short) 8, true), new NotCoordinatorException("Not Coordinator"));
    checkErrorResponse(createOffsetFetchRequestWithMultipleGroups((short) 8, true), new NotCoordinatorException("Not Coordinator"));
    // StopReplica
    for (short version : STOP_REPLICA.allVersions()) {
        checkRequest(createStopReplicaRequest(version, false));
        checkErrorResponse(createStopReplicaRequest(version, false), unknownServerException);
    }
    // CreatePartitions
    for (short version : CREATE_PARTITIONS.allVersions()) {
        checkRequest(createCreatePartitionsRequestWithAssignments(version));
    }
    // UpdateMetadata
    for (short version : UPDATE_METADATA.allVersions()) {
        checkRequest(createUpdateMetadataRequest(version, null));
        checkErrorResponse(createUpdateMetadataRequest(version, null), unknownServerException);
    }
    // LeaderForEpoch
    checkRequest(createLeaderEpochRequestForConsumer());
    checkErrorResponse(createLeaderEpochRequestForConsumer(), unknownServerException);
    // TxnOffsetCommit
    checkRequest(createTxnOffsetCommitRequestWithAutoDowngrade());
    checkErrorResponse(createTxnOffsetCommitRequestWithAutoDowngrade(), unknownServerException);
    // DescribeAcls
    checkErrorResponse(createDescribeAclsRequest((short) 0), new SecurityDisabledException("Security is not enabled."));
    checkErrorResponse(createCreateAclsRequest((short) 0), new SecurityDisabledException("Security is not enabled."));
    // DeleteAcls
    checkErrorResponse(createDeleteAclsRequest((short) 0), new SecurityDisabledException("Security is not enabled."));
    // DescribeConfigs
    checkRequest(createDescribeConfigsRequestWithConfigEntries((short) 0));
    checkRequest(createDescribeConfigsRequestWithConfigEntries((short) 1));
    checkRequest(createDescribeConfigsRequestWithDocumentation((short) 1));
    checkRequest(createDescribeConfigsRequestWithDocumentation((short) 2));
    checkRequest(createDescribeConfigsRequestWithDocumentation((short) 3));
    checkDescribeConfigsResponseVersions();
    // ElectLeaders
    checkRequest(createElectLeadersRequestNullPartitions());
}
Also used : NotCoordinatorException(org.apache.kafka.common.errors.NotCoordinatorException) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) SecurityDisabledException(org.apache.kafka.common.errors.SecurityDisabledException) Test(org.junit.jupiter.api.Test)

Example 34 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class RequestResponseTest method fetchResponseVersionTest.

@Test
public void fetchResponseVersionTest() {
    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
    Uuid id = Uuid.randomUuid();
    Map<Uuid, String> topicNames = Collections.singletonMap(id, "test");
    TopicPartition tp = new TopicPartition("test", 0);
    MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));
    FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(1000000).setLogStartOffset(-1).setRecords(records);
    // Use zero UUID since we are comparing with old request versions
    responseData.put(new TopicIdPartition(Uuid.ZERO_UUID, tp), partitionData);
    LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> tpResponseData = new LinkedHashMap<>();
    tpResponseData.put(tp, partitionData);
    FetchResponse v0Response = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, responseData);
    FetchResponse v1Response = FetchResponse.of(Errors.NONE, 10, INVALID_SESSION_ID, responseData);
    FetchResponse v0Deserialized = FetchResponse.parse(v0Response.serialize((short) 0), (short) 0);
    FetchResponse v1Deserialized = FetchResponse.parse(v1Response.serialize((short) 1), (short) 1);
    assertEquals(0, v0Deserialized.throttleTimeMs(), "Throttle time must be zero");
    assertEquals(10, v1Deserialized.throttleTimeMs(), "Throttle time must be 10");
    assertEquals(tpResponseData, v0Deserialized.responseData(topicNames, (short) 0), "Response data does not match");
    assertEquals(tpResponseData, v1Deserialized.responseData(topicNames, (short) 1), "Response data does not match");
    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> idResponseData = new LinkedHashMap<>();
    idResponseData.put(new TopicIdPartition(id, new TopicPartition("test", 0)), new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(1000000).setLogStartOffset(-1).setRecords(records));
    FetchResponse idTestResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, idResponseData);
    FetchResponse v12Deserialized = FetchResponse.parse(idTestResponse.serialize((short) 12), (short) 12);
    FetchResponse newestDeserialized = FetchResponse.parse(idTestResponse.serialize(FETCH.latestVersion()), FETCH.latestVersion());
    assertTrue(v12Deserialized.topicIds().isEmpty());
    assertEquals(1, newestDeserialized.topicIds().size());
    assertTrue(newestDeserialized.topicIds().contains(id));
}
Also used : TopicIdPartition(org.apache.kafka.common.TopicIdPartition) LinkedHashMap(java.util.LinkedHashMap) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 35 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class FetchRequestTest method testToReplaceWithDifferentVersions.

@ParameterizedTest
@MethodSource("fetchVersions")
public void testToReplaceWithDifferentVersions(short version) {
    boolean fetchRequestUsesTopicIds = version >= 13;
    Uuid topicId = Uuid.randomUuid();
    TopicIdPartition tp = new TopicIdPartition(topicId, 0, "topic");
    Map<TopicPartition, FetchRequest.PartitionData> partitionData = Collections.singletonMap(tp.topicPartition(), new FetchRequest.PartitionData(topicId, 0, 0, 0, Optional.empty()));
    List<TopicIdPartition> toReplace = Collections.singletonList(tp);
    FetchRequest fetchRequest = FetchRequest.Builder.forReplica(version, 0, 1, 1, partitionData).removed(Collections.emptyList()).replaced(toReplace).metadata(FetchMetadata.newIncremental(123)).build(version);
    // If version < 13, we should not see any partitions in forgottenTopics. This is because we can not
    // distinguish different topic IDs on versions earlier than 13.
    assertEquals(fetchRequestUsesTopicIds, fetchRequest.data().forgottenTopicsData().size() > 0);
    fetchRequest.data().forgottenTopicsData().forEach(forgottenTopic -> {
        // Since we didn't serialize, we should see the topic name and ID regardless of the version.
        assertEquals(tp.topic(), forgottenTopic.topic());
        assertEquals(topicId, forgottenTopic.topicId());
    });
    assertEquals(1, fetchRequest.data().topics().size());
    fetchRequest.data().topics().forEach(topic -> {
        // Since we didn't serialize, we should see the topic name and ID regardless of the version.
        assertEquals(tp.topic(), topic.topic());
        assertEquals(topicId, topic.topicId());
    });
}
Also used : Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Aggregations

TopicIdPartition (org.apache.kafka.common.TopicIdPartition)47 TopicPartition (org.apache.kafka.common.TopicPartition)32 Test (org.junit.jupiter.api.Test)25 LinkedHashMap (java.util.LinkedHashMap)22 Uuid (org.apache.kafka.common.Uuid)18 ArrayList (java.util.ArrayList)17 HashMap (java.util.HashMap)16 FetchResponseData (org.apache.kafka.common.message.FetchResponseData)15 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)13 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)13 List (java.util.List)12 FetchRequest (org.apache.kafka.common.requests.FetchRequest)12 PartitionData (org.apache.kafka.common.requests.FetchRequest.PartitionData)12 Arrays.asList (java.util.Arrays.asList)10 Collections.emptyList (java.util.Collections.emptyList)10 Collections.singletonList (java.util.Collections.singletonList)10 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)10 KafkaException (org.apache.kafka.common.KafkaException)9 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)8 FetchResponse (org.apache.kafka.common.requests.FetchResponse)8