Search in sources :

Example 36 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class TopicBasedRemoteLogMetadataManagerRestartTest method testRLMMAPIsAfterRestart.

@Test
public void testRLMMAPIsAfterRestart() throws Exception {
    // Create topics.
    String leaderTopic = "new-leader";
    HashMap<Object, Seq<Object>> assignedLeaderTopicReplicas = new HashMap<>();
    List<Object> leaderTopicReplicas = new ArrayList<>();
    // Set broker id 0 as the first entry which is taken as the leader.
    leaderTopicReplicas.add(0);
    leaderTopicReplicas.add(1);
    leaderTopicReplicas.add(2);
    assignedLeaderTopicReplicas.put(0, JavaConverters.asScalaBuffer(leaderTopicReplicas));
    remoteLogMetadataManagerHarness.createTopicWithAssignment(leaderTopic, JavaConverters.mapAsScalaMap(assignedLeaderTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
    String followerTopic = "new-follower";
    HashMap<Object, Seq<Object>> assignedFollowerTopicReplicas = new HashMap<>();
    List<Object> followerTopicReplicas = new ArrayList<>();
    // Set broker id 1 as the first entry which is taken as the leader.
    followerTopicReplicas.add(1);
    followerTopicReplicas.add(2);
    followerTopicReplicas.add(0);
    assignedFollowerTopicReplicas.put(0, JavaConverters.asScalaBuffer(followerTopicReplicas));
    remoteLogMetadataManagerHarness.createTopicWithAssignment(followerTopic, JavaConverters.mapAsScalaMap(assignedFollowerTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
    final TopicIdPartition leaderTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(leaderTopic, 0));
    final TopicIdPartition followerTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(followerTopic, 0));
    // Register these partitions to RLMM.
    topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition));
    // Add segments for these partitions but they are not available as they have not yet been subscribed.
    RemoteLogSegmentMetadata leaderSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
    topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata).get();
    RemoteLogSegmentMetadata followerSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(followerTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
    topicBasedRlmm().addRemoteLogSegmentMetadata(followerSegmentMetadata).get();
    // Stop TopicBasedRemoteLogMetadataManager only.
    stopTopicBasedRemoteLogMetadataManagerHarness();
    // Start TopicBasedRemoteLogMetadataManager but do not start consumer thread to check whether the stored metadata is
    // loaded successfully or not.
    startTopicBasedRemoteLogMetadataManagerHarness(false);
    // Register these partitions to RLMM, which loads the respective metadata snapshots.
    topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition));
    // Check for the stored entries from the earlier run.
    Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Collections.singleton(leaderSegmentMetadata).iterator(), topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)));
    Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Collections.singleton(followerSegmentMetadata).iterator(), topicBasedRlmm().listRemoteLogSegments(followerTopicIdPartition)));
    // Check whether the check-pointed consumer offsets are stored or not.
    Path committedOffsetsPath = new File(logDir, COMMITTED_OFFSETS_FILE_NAME).toPath();
    Assertions.assertTrue(committedOffsetsPath.toFile().exists());
    CommittedOffsetsFile committedOffsetsFile = new CommittedOffsetsFile(committedOffsetsPath.toFile());
    int metadataPartition1 = topicBasedRlmm().metadataPartition(leaderTopicIdPartition);
    int metadataPartition2 = topicBasedRlmm().metadataPartition(followerTopicIdPartition);
    Optional<Long> receivedOffsetForPartition1 = topicBasedRlmm().receivedOffsetForPartition(metadataPartition1);
    Optional<Long> receivedOffsetForPartition2 = topicBasedRlmm().receivedOffsetForPartition(metadataPartition2);
    Assertions.assertTrue(receivedOffsetForPartition1.isPresent());
    Assertions.assertTrue(receivedOffsetForPartition2.isPresent());
    // Make sure these offsets are at least 0.
    Assertions.assertTrue(receivedOffsetForPartition1.get() >= 0);
    Assertions.assertTrue(receivedOffsetForPartition2.get() >= 0);
    // Check the stored entries and the offsets that were set on consumer are the same.
    Map<Integer, Long> partitionToOffset = committedOffsetsFile.readEntries();
    Assertions.assertEquals(partitionToOffset.get(metadataPartition1), receivedOffsetForPartition1.get());
    Assertions.assertEquals(partitionToOffset.get(metadataPartition2), receivedOffsetForPartition2.get());
    // Start Consumer thread
    topicBasedRlmm().startConsumerThread();
    // Add one more segment
    RemoteLogSegmentMetadata leaderSegmentMetadata2 = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 101, 200, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 101L));
    topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata2).get();
    // Check that both the stored segment and recently added segment are available.
    Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Arrays.asList(leaderSegmentMetadata, leaderSegmentMetadata2).iterator(), topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)));
}
Also used : Path(java.nio.file.Path) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) TopicPartition(org.apache.kafka.common.TopicPartition) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) File(java.io.File) Seq(scala.collection.Seq) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata) Test(org.junit.jupiter.api.Test)

Example 37 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class InmemoryRemoteLogMetadataManager method putRemotePartitionDeleteMetadata.

@Override
public CompletableFuture<Void> putRemotePartitionDeleteMetadata(RemotePartitionDeleteMetadata remotePartitionDeleteMetadata) throws RemoteStorageException {
    log.debug("Adding delete state with: [{}]", remotePartitionDeleteMetadata);
    Objects.requireNonNull(remotePartitionDeleteMetadata, "remotePartitionDeleteMetadata can not be null");
    TopicIdPartition topicIdPartition = remotePartitionDeleteMetadata.topicIdPartition();
    RemotePartitionDeleteState targetState = remotePartitionDeleteMetadata.state();
    RemotePartitionDeleteMetadata existingMetadata = idToPartitionDeleteMetadata.get(topicIdPartition);
    RemotePartitionDeleteState existingState = existingMetadata != null ? existingMetadata.state() : null;
    if (!RemotePartitionDeleteState.isValidTransition(existingState, targetState)) {
        throw new IllegalStateException("Current state: " + existingState + ", target state: " + targetState);
    }
    idToPartitionDeleteMetadata.put(topicIdPartition, remotePartitionDeleteMetadata);
    if (targetState == RemotePartitionDeleteState.DELETE_PARTITION_FINISHED) {
        // Remove the association for the partition.
        idToRemoteLogMetadataCache.remove(topicIdPartition);
        idToPartitionDeleteMetadata.remove(topicIdPartition);
    }
    return COMPLETED_FUTURE;
}
Also used : TopicIdPartition(org.apache.kafka.common.TopicIdPartition)

Example 38 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class FetcherTest method testCompletedFetchRemoval.

@Test
public void testCompletedFetchRemoval() {
    // Ensure the removal of completed fetches that cause an Exception if and only if they contain empty records.
    buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
    assignFromUser(mkSet(tp0, tp1, tp2, tp3));
    subscriptions.seek(tp0, 1);
    subscriptions.seek(tp1, 1);
    subscriptions.seek(tp2, 1);
    subscriptions.seek(tp3, 1);
    assertEquals(1, fetcher.sendFetches());
    Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>();
    partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setRecords(records));
    partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100));
    partitions.put(tidp2, new FetchResponseData.PartitionData().setPartitionIndex(tp2.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(nextRecords));
    partitions.put(tidp3, new FetchResponseData.PartitionData().setPartitionIndex(tp3.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(partialRecords));
    client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
    consumerClient.poll(time.timer(0));
    List<ConsumerRecord<byte[], byte[]>> fetchedRecords = new ArrayList<>();
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchedRecords();
    for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
    assertEquals(fetchedRecords.size(), subscriptions.position(tp1).offset - 1);
    assertEquals(4, subscriptions.position(tp1).offset);
    assertEquals(3, fetchedRecords.size());
    List<OffsetOutOfRangeException> oorExceptions = new ArrayList<>();
    try {
        recordsByPartition = fetchedRecords();
        for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
    } catch (OffsetOutOfRangeException oor) {
        oorExceptions.add(oor);
    }
    // Should have received one OffsetOutOfRangeException for partition tp1
    assertEquals(1, oorExceptions.size());
    OffsetOutOfRangeException oor = oorExceptions.get(0);
    assertTrue(oor.offsetOutOfRangePartitions().containsKey(tp0));
    assertEquals(oor.offsetOutOfRangePartitions().size(), 1);
    recordsByPartition = fetchedRecords();
    for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
    // Should not have received an Exception for tp2.
    assertEquals(6, subscriptions.position(tp2).offset);
    assertEquals(5, fetchedRecords.size());
    int numExceptionsExpected = 3;
    List<KafkaException> kafkaExceptions = new ArrayList<>();
    for (int i = 1; i <= numExceptionsExpected; i++) {
        try {
            recordsByPartition = fetchedRecords();
            for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
        } catch (KafkaException e) {
            kafkaExceptions.add(e);
        }
    }
    // Should have received as much as numExceptionsExpected Kafka exceptions for tp3.
    assertEquals(numExceptionsExpected, kafkaExceptions.size());
}
Also used : ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) LinkedHashMap(java.util.LinkedHashMap) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) KafkaException(org.apache.kafka.common.KafkaException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) Test(org.junit.jupiter.api.Test)

Example 39 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class FetcherTest method testFetcherConcurrency.

@Test
public void testFetcherConcurrency() throws Exception {
    int numPartitions = 20;
    Set<TopicPartition> topicPartitions = new HashSet<>();
    for (int i = 0; i < numPartitions; i++) topicPartitions.add(new TopicPartition(topicName, i));
    LogContext logContext = new LogContext();
    buildDependencies(new MetricConfig(), Long.MAX_VALUE, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), logContext);
    fetcher = new Fetcher<byte[], byte[]>(new LogContext(), consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, 2 * numPartitions, true, "", new ByteArrayDeserializer(), new ByteArrayDeserializer(), metadata, subscriptions, metrics, metricsRegistry, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, apiVersions) {

        @Override
        protected FetchSessionHandler sessionHandler(int id) {
            final FetchSessionHandler handler = super.sessionHandler(id);
            if (handler == null)
                return null;
            else {
                return new FetchSessionHandler(new LogContext(), id) {

                    @Override
                    public Builder newBuilder() {
                        verifySessionPartitions();
                        return handler.newBuilder();
                    }

                    @Override
                    public boolean handleResponse(FetchResponse response, short version) {
                        verifySessionPartitions();
                        return handler.handleResponse(response, version);
                    }

                    @Override
                    public void handleError(Throwable t) {
                        verifySessionPartitions();
                        handler.handleError(t);
                    }

                    // Verify that session partitions can be traversed safely.
                    private void verifySessionPartitions() {
                        try {
                            Field field = FetchSessionHandler.class.getDeclaredField("sessionPartitions");
                            field.setAccessible(true);
                            LinkedHashMap<?, ?> sessionPartitions = (LinkedHashMap<?, ?>) field.get(handler);
                            for (Map.Entry<?, ?> entry : sessionPartitions.entrySet()) {
                                // If `sessionPartitions` are modified on another thread, Thread.yield will increase the
                                // possibility of ConcurrentModificationException if appropriate synchronization is not used.
                                Thread.yield();
                            }
                        } catch (Exception e) {
                            throw new RuntimeException(e);
                        }
                    }
                };
            }
        }
    };
    MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, numPartitions), tp -> validLeaderEpoch, topicIds);
    client.updateMetadata(initialMetadataResponse);
    fetchSize = 10000;
    assignFromUser(topicPartitions);
    topicPartitions.forEach(tp -> subscriptions.seek(tp, 0L));
    AtomicInteger fetchesRemaining = new AtomicInteger(1000);
    executorService = Executors.newSingleThreadExecutor();
    Future<?> future = executorService.submit(() -> {
        while (fetchesRemaining.get() > 0) {
            synchronized (consumerClient) {
                if (!client.requests().isEmpty()) {
                    ClientRequest request = client.requests().peek();
                    FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
                    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
                    for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry : fetchRequest.fetchData(topicNames).entrySet()) {
                        TopicIdPartition tp = entry.getKey();
                        long offset = entry.getValue().fetchOffset;
                        responseMap.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(offset + 2).setLastStableOffset(offset + 2).setLogStartOffset(0).setRecords(buildRecords(offset, 2, offset)));
                    }
                    client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
                    consumerClient.poll(time.timer(0));
                }
            }
        }
        return fetchesRemaining.get();
    });
    Map<TopicPartition, Long> nextFetchOffsets = topicPartitions.stream().collect(Collectors.toMap(Function.identity(), t -> 0L));
    while (fetchesRemaining.get() > 0 && !future.isDone()) {
        if (fetcher.sendFetches() == 1) {
            synchronized (consumerClient) {
                consumerClient.poll(time.timer(0));
            }
        }
        if (fetcher.hasCompletedFetches()) {
            Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
            if (!fetchedRecords.isEmpty()) {
                fetchesRemaining.decrementAndGet();
                fetchedRecords.forEach((tp, records) -> {
                    assertEquals(2, records.size());
                    long nextOffset = nextFetchOffsets.get(tp);
                    assertEquals(nextOffset, records.get(0).offset());
                    assertEquals(nextOffset + 1, records.get(1).offset());
                    nextFetchOffsets.put(tp, nextOffset + 2);
                });
            }
        }
    }
    assertEquals(0, future.get());
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) BeforeEach(org.junit.jupiter.api.BeforeEach) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) DefaultRecordBatch(org.apache.kafka.common.record.DefaultRecordBatch) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Collections.singletonList(java.util.Collections.singletonList) ClientUtils(org.apache.kafka.clients.ClientUtils) Cluster(org.apache.kafka.common.Cluster) Future(java.util.concurrent.Future) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) DataOutputStream(java.io.DataOutputStream) ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) Arrays.asList(java.util.Arrays.asList) RecordBatch(org.apache.kafka.common.record.RecordBatch) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) FetchResponse(org.apache.kafka.common.requests.FetchResponse) TimestampType(org.apache.kafka.common.record.TimestampType) Sensor(org.apache.kafka.common.metrics.Sensor) CompressionType(org.apache.kafka.common.record.CompressionType) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) StandardCharsets(java.nio.charset.StandardCharsets) Executors(java.util.concurrent.Executors) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Metrics(org.apache.kafka.common.metrics.Metrics) ApiMessageType(org.apache.kafka.common.message.ApiMessageType) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) FetchRequest(org.apache.kafka.common.requests.FetchRequest) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) Records(org.apache.kafka.common.record.Records) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) OffsetForLeaderPartition(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition) ClientDnsLookup(org.apache.kafka.clients.ClientDnsLookup) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) RequestTestUtils(org.apache.kafka.common.requests.RequestTestUtils) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ListOffsetsPartitionResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse) OffsetForLeaderEpochRequestData(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData) INVALID_SESSION_ID(org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) UNDEFINED_EPOCH(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH) NetworkClient(org.apache.kafka.clients.NetworkClient) Deserializer(org.apache.kafka.common.serialization.Deserializer) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TestUtils.assertOptional(org.apache.kafka.test.TestUtils.assertOptional) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MockSelector(org.apache.kafka.test.MockSelector) Field(java.lang.reflect.Field) ApiVersions(org.apache.kafka.clients.ApiVersions) MetricNameTemplate(org.apache.kafka.common.MetricNameTemplate) OffsetForLeaderEpochResponseData(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData) Assertions.assertArrayEquals(org.junit.jupiter.api.Assertions.assertArrayEquals) AfterEach(org.junit.jupiter.api.AfterEach) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) BytesDeserializer(org.apache.kafka.common.serialization.BytesDeserializer) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ControlRecordType(org.apache.kafka.common.record.ControlRecordType) ByteBuffer(java.nio.ByteBuffer) ClientRequest(org.apache.kafka.clients.ClientRequest) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) Record(org.apache.kafka.common.record.Record) Collections.singleton(java.util.Collections.singleton) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) BufferSupplier(org.apache.kafka.common.utils.BufferSupplier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MetricName(org.apache.kafka.common.MetricName) OffsetForLeaderTopicResult(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.OffsetForLeaderTopicResult) ListOffsetsTopic(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) Collections.emptyList(java.util.Collections.emptyList) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) Collectors(java.util.stream.Collectors) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) Test(org.junit.jupiter.api.Test) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) Header(org.apache.kafka.common.header.Header) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) Optional(java.util.Optional) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) OffsetsForLeaderEpochResponse(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse) Uuid(org.apache.kafka.common.Uuid) EpochEndOffset(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset) Metadata(org.apache.kafka.clients.Metadata) EndTransactionMarker(org.apache.kafka.common.record.EndTransactionMarker) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) HashMap(java.util.HashMap) Function(java.util.function.Function) HashSet(java.util.HashSet) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) Collections.singletonMap(java.util.Collections.singletonMap) ExecutorService(java.util.concurrent.ExecutorService) Utils(org.apache.kafka.common.utils.Utils) UNDEFINED_EPOCH_OFFSET(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) MockClient(org.apache.kafka.clients.MockClient) ListOffsetsPartition(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition) Iterator(java.util.Iterator) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) TimeUnit(java.util.concurrent.TimeUnit) IsolationLevel(org.apache.kafka.common.IsolationLevel) DelayedReceive(org.apache.kafka.test.DelayedReceive) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) OffsetsForLeaderEpochRequest(org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest) Collections(java.util.Collections) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) LinkedHashMap(java.util.LinkedHashMap) Field(java.lang.reflect.Field) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ClientRequest(org.apache.kafka.clients.ClientRequest) HashSet(java.util.HashSet) LogContext(org.apache.kafka.common.utils.LogContext) FetchResponse(org.apache.kafka.common.requests.FetchResponse) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) Collections.emptyMap(java.util.Collections.emptyMap) Test(org.junit.jupiter.api.Test)

Example 40 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class FetcherTest method testFetchResponseMetricsWithOnePartitionError.

@Test
public void testFetchResponseMetricsWithOnePartitionError() {
    buildFetcher();
    assignFromUser(mkSet(tp0, tp1));
    subscriptions.seek(tp0, 0);
    subscriptions.seek(tp1, 0);
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
    KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
    MemoryRecords records = builder.build();
    Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new HashMap<>();
    partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(100).setLogStartOffset(0).setRecords(records));
    partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100).setLogStartOffset(0));
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
    consumerClient.poll(time.timer(0));
    fetcher.collectFetch();
    int expectedBytes = 0;
    for (Record record : records.records()) expectedBytes += record.sizeInBytes();
    assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON);
    assertEquals(3, (Double) recordsCountAverage.metricValue(), EPSILON);
}
Also used : LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) LinkedHashMap(java.util.LinkedHashMap) MetricName(org.apache.kafka.common.MetricName) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Record(org.apache.kafka.common.record.Record) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Aggregations

TopicIdPartition (org.apache.kafka.common.TopicIdPartition)47 TopicPartition (org.apache.kafka.common.TopicPartition)32 Test (org.junit.jupiter.api.Test)25 LinkedHashMap (java.util.LinkedHashMap)22 Uuid (org.apache.kafka.common.Uuid)18 ArrayList (java.util.ArrayList)17 HashMap (java.util.HashMap)16 FetchResponseData (org.apache.kafka.common.message.FetchResponseData)15 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)13 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)13 List (java.util.List)12 FetchRequest (org.apache.kafka.common.requests.FetchRequest)12 PartitionData (org.apache.kafka.common.requests.FetchRequest.PartitionData)12 Arrays.asList (java.util.Arrays.asList)10 Collections.emptyList (java.util.Collections.emptyList)10 Collections.singletonList (java.util.Collections.singletonList)10 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)10 KafkaException (org.apache.kafka.common.KafkaException)9 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)8 FetchResponse (org.apache.kafka.common.requests.FetchResponse)8