Search in sources :

Example 26 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class TopicBasedRemoteLogMetadataManagerHarness method initializeRemoteLogMetadataManager.

public void initializeRemoteLogMetadataManager(Set<TopicIdPartition> topicIdPartitions, boolean startConsumerThread) {
    String logDir = TestUtils.tempDirectory("rlmm_segs_").getAbsolutePath();
    topicBasedRemoteLogMetadataManager = new TopicBasedRemoteLogMetadataManager(startConsumerThread) {

        @Override
        public void onPartitionLeadershipChanges(Set<TopicIdPartition> leaderPartitions, Set<TopicIdPartition> followerPartitions) {
            Set<TopicIdPartition> allReplicas = new HashSet<>(leaderPartitions);
            allReplicas.addAll(followerPartitions);
            // Make sure the topic partition dirs exist as the topics might not have been created on this broker.
            for (TopicIdPartition topicIdPartition : allReplicas) {
                // Create partition directory in the log directory created by topicBasedRemoteLogMetadataManager.
                File partitionDir = new File(new File(config().logDir()), topicIdPartition.topicPartition().topic() + "-" + topicIdPartition.topicPartition().partition());
                partitionDir.mkdirs();
                if (!partitionDir.exists()) {
                    throw new KafkaException("Partition directory:[" + partitionDir + "] could not be created successfully.");
                }
            }
            super.onPartitionLeadershipChanges(leaderPartitions, followerPartitions);
        }
    };
    // Initialize TopicBasedRemoteLogMetadataManager.
    Map<String, Object> configs = new HashMap<>();
    configs.put(REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers(listenerName()));
    configs.put(BROKER_ID, 0);
    configs.put(LOG_DIR, logDir);
    configs.put(REMOTE_LOG_METADATA_TOPIC_PARTITIONS_PROP, METADATA_TOPIC_PARTITIONS_COUNT);
    configs.put(REMOTE_LOG_METADATA_TOPIC_REPLICATION_FACTOR_PROP, METADATA_TOPIC_REPLICATION_FACTOR);
    configs.put(REMOTE_LOG_METADATA_TOPIC_RETENTION_MS_PROP, METADATA_TOPIC_RETENTION_MS);
    log.debug("TopicBasedRemoteLogMetadataManager configs before adding overridden properties: {}", configs);
    // Add override properties.
    configs.putAll(overrideRemoteLogMetadataManagerProps());
    log.debug("TopicBasedRemoteLogMetadataManager configs after adding overridden properties: {}", configs);
    topicBasedRemoteLogMetadataManager.configure(configs);
    try {
        waitUntilInitialized(60_000);
    } catch (TimeoutException e) {
        throw new KafkaException(e);
    }
    topicBasedRemoteLogMetadataManager.onPartitionLeadershipChanges(topicIdPartitions, Collections.emptySet());
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) KafkaException(org.apache.kafka.common.KafkaException) File(java.io.File) TimeoutException(java.util.concurrent.TimeoutException)

Example 27 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class FileBasedRemoteLogMetadataCacheTest method testFileBasedRemoteLogMetadataCacheWithUnreferencedSegments.

@Test
public void testFileBasedRemoteLogMetadataCacheWithUnreferencedSegments() throws Exception {
    TopicIdPartition partition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("test", 0));
    int brokerId = 0;
    Path path = TestUtils.tempDirectory().toPath();
    // Create file based metadata cache.
    FileBasedRemoteLogMetadataCache cache = new FileBasedRemoteLogMetadataCache(partition, path);
    // Add a segment with start offset as 0 for leader epoch 0.
    RemoteLogSegmentId segmentId1 = new RemoteLogSegmentId(partition, Uuid.randomUuid());
    RemoteLogSegmentMetadata metadata1 = new RemoteLogSegmentMetadata(segmentId1, 0, 100, System.currentTimeMillis(), brokerId, System.currentTimeMillis(), 1024 * 1024, Collections.singletonMap(0, 0L));
    cache.addCopyInProgressSegment(metadata1);
    RemoteLogSegmentMetadataUpdate metadataUpdate1 = new RemoteLogSegmentMetadataUpdate(segmentId1, System.currentTimeMillis(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, brokerId);
    cache.updateRemoteLogSegmentMetadata(metadataUpdate1);
    Optional<RemoteLogSegmentMetadata> receivedMetadata = cache.remoteLogSegmentMetadata(0, 0L);
    assertTrue(receivedMetadata.isPresent());
    assertEquals(metadata1.createWithUpdates(metadataUpdate1), receivedMetadata.get());
    // Add a new segment with start offset as 0 for leader epoch 0, which should replace the earlier segment.
    RemoteLogSegmentId segmentId2 = new RemoteLogSegmentId(partition, Uuid.randomUuid());
    RemoteLogSegmentMetadata metadata2 = new RemoteLogSegmentMetadata(segmentId2, 0, 900, System.currentTimeMillis(), brokerId, System.currentTimeMillis(), 1024 * 1024, Collections.singletonMap(0, 0L));
    cache.addCopyInProgressSegment(metadata2);
    RemoteLogSegmentMetadataUpdate metadataUpdate2 = new RemoteLogSegmentMetadataUpdate(segmentId2, System.currentTimeMillis(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, brokerId);
    cache.updateRemoteLogSegmentMetadata(metadataUpdate2);
    // Fetch segment for leader epoch:0 and start offset:0, it should be the newly added segment.
    Optional<RemoteLogSegmentMetadata> receivedMetadata2 = cache.remoteLogSegmentMetadata(0, 0L);
    assertTrue(receivedMetadata2.isPresent());
    assertEquals(metadata2.createWithUpdates(metadataUpdate2), receivedMetadata2.get());
    // Flush the cache to the file.
    cache.flushToFile(0, 0L);
    // Create a new cache with loading from the stored path.
    FileBasedRemoteLogMetadataCache loadedCache = new FileBasedRemoteLogMetadataCache(partition, path);
    // Fetch segment for leader epoch:0 and start offset:0, it should be metadata2.
    // This ensures that the ordering of metadata is taken care after loading from the stored snapshots.
    Optional<RemoteLogSegmentMetadata> receivedMetadataAfterLoad = loadedCache.remoteLogSegmentMetadata(0, 0L);
    assertTrue(receivedMetadataAfterLoad.isPresent());
    assertEquals(metadata2.createWithUpdates(metadataUpdate2), receivedMetadataAfterLoad.get());
}
Also used : Path(java.nio.file.Path) RemoteLogSegmentMetadataUpdate(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate) TopicPartition(org.apache.kafka.common.TopicPartition) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata) Test(org.junit.jupiter.api.Test)

Example 28 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class ConsumerTask method maybeSyncCommittedDataAndOffsets.

private void maybeSyncCommittedDataAndOffsets(boolean forceSync) {
    // Return immediately if there is no consumption from last time.
    boolean noConsumedOffsetUpdates = partitionToConsumedOffsets.equals(lastSyncedPartitionToConsumedOffsets);
    if (noConsumedOffsetUpdates || !forceSync && time.milliseconds() - lastSyncedTimeMs < committedOffsetSyncIntervalMs) {
        log.debug("Skip syncing committed offsets, noConsumedOffsetUpdates: {}, forceSync: {}", noConsumedOffsetUpdates, forceSync);
        return;
    }
    try {
        // get updated by other threads.
        synchronized (assignPartitionsLock) {
            for (TopicIdPartition topicIdPartition : assignedTopicPartitions) {
                int metadataPartition = topicPartitioner.metadataPartition(topicIdPartition);
                Long offset = partitionToConsumedOffsets.get(metadataPartition);
                if (offset != null) {
                    remotePartitionMetadataEventHandler.syncLogMetadataSnapshot(topicIdPartition, metadataPartition, offset);
                } else {
                    log.debug("Skipping syncup of the remote-log-metadata-file for partition:{} , with remote log metadata partition{}, and no offset", topicIdPartition, metadataPartition);
                }
            }
            // Write partitionToConsumedOffsets into committed offsets file as we do not want to process them again
            // in case of restarts.
            committedOffsetsFile.writeEntries(partitionToConsumedOffsets);
            lastSyncedPartitionToConsumedOffsets = new HashMap<>(partitionToConsumedOffsets);
        }
        lastSyncedTimeMs = time.milliseconds();
    } catch (IOException e) {
        throw new KafkaException("Error encountered while writing committed offsets to a local file", e);
    }
}
Also used : KafkaException(org.apache.kafka.common.KafkaException) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) IOException(java.io.IOException)

Example 29 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class ConsumerTask method updateAssignmentsForPartitions.

private void updateAssignmentsForPartitions(Set<TopicIdPartition> addedPartitions, Set<TopicIdPartition> removedPartitions) {
    log.info("Updating assignments for addedPartitions: {} and removedPartition: {}", addedPartitions, removedPartitions);
    Objects.requireNonNull(addedPartitions, "addedPartitions must not be null");
    Objects.requireNonNull(removedPartitions, "removedPartitions must not be null");
    if (addedPartitions.isEmpty() && removedPartitions.isEmpty()) {
        return;
    }
    synchronized (assignPartitionsLock) {
        Set<TopicIdPartition> updatedReassignedPartitions = new HashSet<>(assignedTopicPartitions);
        updatedReassignedPartitions.addAll(addedPartitions);
        updatedReassignedPartitions.removeAll(removedPartitions);
        Set<Integer> updatedAssignedMetaPartitions = new HashSet<>();
        for (TopicIdPartition tp : updatedReassignedPartitions) {
            updatedAssignedMetaPartitions.add(topicPartitioner.metadataPartition(tp));
        }
        // Clear removed topic partitions from inmemory cache.
        for (TopicIdPartition removedPartition : removedPartitions) {
            remotePartitionMetadataEventHandler.clearTopicPartition(removedPartition);
        }
        assignedTopicPartitions = Collections.unmodifiableSet(updatedReassignedPartitions);
        log.debug("Assigned topic partitions: {}", assignedTopicPartitions);
        if (!updatedAssignedMetaPartitions.equals(assignedMetaPartitions)) {
            assignedMetaPartitions = Collections.unmodifiableSet(updatedAssignedMetaPartitions);
            log.debug("Assigned metadata topic partitions: {}", assignedMetaPartitions);
            assignPartitions = true;
            assignPartitionsLock.notifyAll();
        } else {
            log.debug("No change in assigned metadata topic partitions: {}", assignedMetaPartitions);
        }
    }
}
Also used : TopicIdPartition(org.apache.kafka.common.TopicIdPartition) HashSet(java.util.HashSet)

Example 30 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class FetchResponseBenchmark method setup.

@Setup(Level.Trial)
public void setup() {
    MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord(1000, "key1".getBytes(StandardCharsets.UTF_8), "value1".getBytes(StandardCharsets.UTF_8)), new SimpleRecord(1001, "key2".getBytes(StandardCharsets.UTF_8), "value2".getBytes(StandardCharsets.UTF_8)), new SimpleRecord(1002, "key3".getBytes(StandardCharsets.UTF_8), "value3".getBytes(StandardCharsets.UTF_8)));
    this.responseData = new LinkedHashMap<>();
    this.topicIds = new HashMap<>();
    this.topicNames = new HashMap<>();
    for (int topicIdx = 0; topicIdx < topicCount; topicIdx++) {
        String topic = UUID.randomUUID().toString();
        Uuid id = Uuid.randomUuid();
        topicIds.put(topic, id);
        topicNames.put(id, topic);
        for (int partitionId = 0; partitionId < partitionCount; partitionId++) {
            FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData().setPartitionIndex(partitionId).setLastStableOffset(0).setLogStartOffset(0).setRecords(records);
            responseData.put(new TopicIdPartition(id, new TopicPartition(topic, partitionId)), partitionData);
        }
    }
    this.header = new ResponseHeader(100, ApiKeys.FETCH.responseHeaderVersion(ApiKeys.FETCH.latestVersion()));
    this.fetchResponse = FetchResponse.of(Errors.NONE, 0, 0, responseData);
    this.fetchResponseData = this.fetchResponse.data();
}
Also used : ResponseHeader(org.apache.kafka.common.requests.ResponseHeader) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Setup(org.openjdk.jmh.annotations.Setup)

Aggregations

TopicIdPartition (org.apache.kafka.common.TopicIdPartition)47 TopicPartition (org.apache.kafka.common.TopicPartition)32 Test (org.junit.jupiter.api.Test)25 LinkedHashMap (java.util.LinkedHashMap)22 Uuid (org.apache.kafka.common.Uuid)18 ArrayList (java.util.ArrayList)17 HashMap (java.util.HashMap)16 FetchResponseData (org.apache.kafka.common.message.FetchResponseData)15 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)13 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)13 List (java.util.List)12 FetchRequest (org.apache.kafka.common.requests.FetchRequest)12 PartitionData (org.apache.kafka.common.requests.FetchRequest.PartitionData)12 Arrays.asList (java.util.Arrays.asList)10 Collections.emptyList (java.util.Collections.emptyList)10 Collections.singletonList (java.util.Collections.singletonList)10 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)10 KafkaException (org.apache.kafka.common.KafkaException)9 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)8 FetchResponse (org.apache.kafka.common.requests.FetchResponse)8