Search in sources :

Example 1 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class FetchRequest method forgottenTopics.

// For versions < 13, builds the forgotten topics list using only the FetchRequestData.
// For versions 13+, builds the forgotten topics list using both the FetchRequestData and a mapping of topic IDs to names.
public List<TopicIdPartition> forgottenTopics(Map<Uuid, String> topicNames) {
    if (toForget == null) {
        synchronized (this) {
            if (toForget == null) {
                toForget = new ArrayList<>();
                data.forgottenTopicsData().forEach(forgottenTopic -> {
                    String name;
                    if (version() < 13) {
                        // can't be null
                        name = forgottenTopic.topic();
                    } else {
                        name = topicNames.get(forgottenTopic.topicId());
                    }
                    // Topic name may be null here if the topic name was unable to be resolved using the topicNames map.
                    forgottenTopic.partitions().forEach(partitionId -> toForget.add(new TopicIdPartition(forgottenTopic.topicId(), new TopicPartition(name, partitionId))));
                });
            }
        }
    }
    return toForget;
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) TopicIdPartition(org.apache.kafka.common.TopicIdPartition)

Example 2 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class FetchSessionHandlerTest method testIncrementalPartitionRemoval.

@Test
public void testIncrementalPartitionRemoval() {
    Map<String, Uuid> topicIds = new HashMap<>();
    Map<Uuid, String> topicNames = new HashMap<>();
    // We want to test both on older versions that do not use topic IDs and on newer versions that do.
    List<Short> versions = Arrays.asList((short) 12, ApiKeys.FETCH.latestVersion());
    versions.forEach(version -> {
        FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
        FetchSessionHandler.Builder builder = handler.newBuilder();
        addTopicId(topicIds, topicNames, "foo", version);
        addTopicId(topicIds, topicNames, "bar", version);
        Uuid fooId = topicIds.getOrDefault("foo", Uuid.ZERO_UUID);
        Uuid barId = topicIds.getOrDefault("bar", Uuid.ZERO_UUID);
        TopicPartition foo0 = new TopicPartition("foo", 0);
        TopicPartition foo1 = new TopicPartition("foo", 1);
        TopicPartition bar0 = new TopicPartition("bar", 0);
        builder.add(foo0, new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
        builder.add(foo1, new FetchRequest.PartitionData(fooId, 10, 110, 210, Optional.empty()));
        builder.add(bar0, new FetchRequest.PartitionData(barId, 20, 120, 220, Optional.empty()));
        FetchSessionHandler.FetchRequestData data = builder.build();
        assertMapsEqual(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200), new ReqEntry("foo", fooId, 1, 10, 110, 210), new ReqEntry("bar", barId, 0, 20, 120, 220)), data.toSend(), data.sessionPartitions());
        assertTrue(data.metadata().isFull());
        FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, fooId, 10, 20), new RespEntry("foo", 1, fooId, 10, 20), new RespEntry("bar", 0, barId, 10, 20)));
        handler.handleResponse(resp, version);
        // Test an incremental fetch request which removes two partitions.
        FetchSessionHandler.Builder builder2 = handler.newBuilder();
        builder2.add(foo1, new FetchRequest.PartitionData(fooId, 10, 110, 210, Optional.empty()));
        FetchSessionHandler.FetchRequestData data2 = builder2.build();
        assertFalse(data2.metadata().isFull());
        assertEquals(123, data2.metadata().sessionId());
        assertEquals(1, data2.metadata().epoch());
        assertMapEquals(reqMap(new ReqEntry("foo", fooId, 1, 10, 110, 210)), data2.sessionPartitions());
        assertMapEquals(reqMap(), data2.toSend());
        ArrayList<TopicIdPartition> expectedToForget2 = new ArrayList<>();
        expectedToForget2.add(new TopicIdPartition(fooId, foo0));
        expectedToForget2.add(new TopicIdPartition(barId, bar0));
        assertListEquals(expectedToForget2, data2.toForget());
        // A FETCH_SESSION_ID_NOT_FOUND response triggers us to close the session.
        // The next request is a session establishing FULL request.
        FetchResponse resp2 = FetchResponse.of(Errors.FETCH_SESSION_ID_NOT_FOUND, 0, INVALID_SESSION_ID, respMap());
        handler.handleResponse(resp2, version);
        FetchSessionHandler.Builder builder3 = handler.newBuilder();
        builder3.add(foo0, new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
        FetchSessionHandler.FetchRequestData data3 = builder3.build();
        assertTrue(data3.metadata().isFull());
        assertEquals(INVALID_SESSION_ID, data3.metadata().sessionId());
        assertEquals(INITIAL_EPOCH, data3.metadata().epoch());
        assertMapsEqual(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200)), data3.sessionPartitions(), data3.toSend());
    });
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) FetchResponse(org.apache.kafka.common.requests.FetchResponse) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 3 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class FetchSessionHandlerTest method testTopicIdReplaced.

@ParameterizedTest
@MethodSource("idUsageCombinations")
public void testTopicIdReplaced(boolean startsWithTopicIds, boolean endsWithTopicIds) {
    TopicPartition tp = new TopicPartition("foo", 0);
    FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
    FetchSessionHandler.Builder builder = handler.newBuilder();
    Uuid topicId1 = startsWithTopicIds ? Uuid.randomUuid() : Uuid.ZERO_UUID;
    builder.add(tp, new FetchRequest.PartitionData(topicId1, 0, 100, 200, Optional.empty()));
    FetchSessionHandler.FetchRequestData data = builder.build();
    assertMapsEqual(reqMap(new ReqEntry("foo", topicId1, 0, 0, 100, 200)), data.toSend(), data.sessionPartitions());
    assertTrue(data.metadata().isFull());
    assertEquals(startsWithTopicIds, data.canUseTopicIds());
    FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, topicId1, 10, 20)));
    short version = startsWithTopicIds ? ApiKeys.FETCH.latestVersion() : 12;
    handler.handleResponse(resp, version);
    // Try to add a new topic ID.
    FetchSessionHandler.Builder builder2 = handler.newBuilder();
    Uuid topicId2 = endsWithTopicIds ? Uuid.randomUuid() : Uuid.ZERO_UUID;
    // Use the same data besides the topic ID.
    FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData(topicId2, 0, 100, 200, Optional.empty());
    builder2.add(tp, partitionData);
    FetchSessionHandler.FetchRequestData data2 = builder2.build();
    if (startsWithTopicIds && endsWithTopicIds) {
        // If we started with an ID, both a only a new ID will count towards replaced.
        // The old topic ID partition should be in toReplace, and the new one should be in toSend.
        assertEquals(Collections.singletonList(new TopicIdPartition(topicId1, tp)), data2.toReplace());
        assertMapsEqual(reqMap(new ReqEntry("foo", topicId2, 0, 0, 100, 200)), data2.toSend(), data2.sessionPartitions());
        // sessionTopicNames should contain only the second topic ID.
        assertEquals(Collections.singletonMap(topicId2, tp.topic()), handler.sessionTopicNames());
    } else if (startsWithTopicIds || endsWithTopicIds) {
        // If we downgraded to not using topic IDs we will want to send this data.
        // However, we will not mark the partition as one replaced. In this scenario, we should see the session close due to
        // changing request types.
        // We will have the new topic ID in the session partition map
        assertEquals(Collections.emptyList(), data2.toReplace());
        assertMapsEqual(reqMap(new ReqEntry("foo", topicId2, 0, 0, 100, 200)), data2.toSend(), data2.sessionPartitions());
        // The old topic ID should be removed as the map will be empty if the request doesn't use topic IDs.
        if (endsWithTopicIds) {
            assertEquals(Collections.singletonMap(topicId2, tp.topic()), handler.sessionTopicNames());
        } else {
            assertEquals(Collections.emptyMap(), handler.sessionTopicNames());
        }
    } else {
        // Otherwise, we have no partition in toReplace and since the partition and topic ID was not updated, there is no data to send.
        assertEquals(Collections.emptyList(), data2.toReplace());
        assertEquals(Collections.emptyMap(), data2.toSend());
        assertMapsEqual(reqMap(new ReqEntry("foo", topicId2, 0, 0, 100, 200)), data2.sessionPartitions());
        // There is also nothing in the sessionTopicNames map, as there are no topic IDs used.
        assertEquals(Collections.emptyMap(), handler.sessionTopicNames());
    }
    // Should have the same session ID, and next epoch and can use topic IDs if it ended with topic IDs.
    assertEquals(123, data2.metadata().sessionId(), "Did not use same session");
    assertEquals(1, data2.metadata().epoch(), "Did not have correct epoch");
    assertEquals(endsWithTopicIds, data2.canUseTopicIds());
}
Also used : FetchResponse(org.apache.kafka.common.requests.FetchResponse) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequest(org.apache.kafka.common.requests.FetchRequest) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 4 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class FetchSessionHandlerTest method testIdUsageWithAllForgottenPartitions.

@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testIdUsageWithAllForgottenPartitions(boolean useTopicIds) {
    // We want to test when all topics are removed from the session
    TopicPartition foo0 = new TopicPartition("foo", 0);
    Uuid topicId = useTopicIds ? Uuid.randomUuid() : Uuid.ZERO_UUID;
    short responseVersion = useTopicIds ? ApiKeys.FETCH.latestVersion() : 12;
    FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
    // Add topic foo to the session
    FetchSessionHandler.Builder builder = handler.newBuilder();
    builder.add(foo0, new FetchRequest.PartitionData(topicId, 0, 100, 200, Optional.empty()));
    FetchSessionHandler.FetchRequestData data = builder.build();
    assertMapsEqual(reqMap(new ReqEntry("foo", topicId, 0, 0, 100, 200)), data.toSend(), data.sessionPartitions());
    assertTrue(data.metadata().isFull());
    assertEquals(useTopicIds, data.canUseTopicIds());
    FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, topicId, 10, 20)));
    handler.handleResponse(resp, responseVersion);
    // Remove the topic from the session
    FetchSessionHandler.Builder builder2 = handler.newBuilder();
    FetchSessionHandler.FetchRequestData data2 = builder2.build();
    assertEquals(Collections.singletonList(new TopicIdPartition(topicId, foo0)), data2.toForget());
    // Should have the same session ID, next epoch, and same ID usage.
    assertEquals(123, data2.metadata().sessionId(), "Did not use same session when useTopicIds was " + useTopicIds);
    assertEquals(1, data2.metadata().epoch(), "Did not have correct epoch when useTopicIds was " + useTopicIds);
    assertEquals(useTopicIds, data2.canUseTopicIds());
}
Also used : Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequest(org.apache.kafka.common.requests.FetchRequest) FetchResponse(org.apache.kafka.common.requests.FetchResponse) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 5 with TopicIdPartition

use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.

the class FetchRequestTest method testForgottenTopics.

@ParameterizedTest
@MethodSource("fetchVersions")
public void testForgottenTopics(short version) {
    // Forgotten topics are not allowed prior to version 7
    if (version >= 7) {
        TopicPartition topicPartition0 = new TopicPartition("topic", 0);
        TopicPartition topicPartition1 = new TopicPartition("unknownIdTopic", 0);
        Uuid topicId0 = Uuid.randomUuid();
        Uuid topicId1 = Uuid.randomUuid();
        // Only include topic IDs for the first topic partition.
        Map<Uuid, String> topicNames = Collections.singletonMap(topicId0, topicPartition0.topic());
        // Include one topic with topic IDs in the topic names map and one without.
        List<TopicIdPartition> toForgetTopics = new LinkedList<>();
        toForgetTopics.add(new TopicIdPartition(topicId0, topicPartition0));
        toForgetTopics.add(new TopicIdPartition(topicId1, topicPartition1));
        boolean fetchRequestUsesTopicIds = version >= 13;
        FetchRequest fetchRequest = FetchRequest.parse(FetchRequest.Builder.forReplica(version, 0, 1, 1, Collections.emptyMap()).removed(toForgetTopics).replaced(Collections.emptyList()).metadata(FetchMetadata.newIncremental(123)).build(version).serialize(), version);
        // For versions < 13, we will be provided a topic name and a zero Uuid in FetchRequestData.
        // Versions 13+ will contain a valid topic ID but an empty topic name.
        List<TopicIdPartition> expectedForgottenTopicData = new LinkedList<>();
        toForgetTopics.forEach(tidp -> {
            String expectedName = fetchRequestUsesTopicIds ? "" : tidp.topic();
            Uuid expectedTopicId = fetchRequestUsesTopicIds ? tidp.topicId() : Uuid.ZERO_UUID;
            expectedForgottenTopicData.add(new TopicIdPartition(expectedTopicId, tidp.partition(), expectedName));
        });
        // Build the list of TopicIdPartitions based on the FetchRequestData that was serialized and parsed.
        List<TopicIdPartition> convertedForgottenTopicData = new LinkedList<>();
        fetchRequest.data().forgottenTopicsData().forEach(forgottenTopic -> forgottenTopic.partitions().forEach(partition -> convertedForgottenTopicData.add(new TopicIdPartition(forgottenTopic.topicId(), partition, forgottenTopic.topic()))));
        // The TopicIdPartitions built from the request data should match what we expect.
        assertEquals(expectedForgottenTopicData, convertedForgottenTopicData);
        // Get the forgottenTopics from the request data.
        List<TopicIdPartition> forgottenTopics = fetchRequest.forgottenTopics(topicNames);
        // For fetch request version 13+ we expect topic names to be filled in for all topics in the topicNames map.
        // Otherwise, the topic name should be null.
        // For earlier request versions, we expect topic names and zero Uuids.
        // Build the list of expected TopicIdPartitions. These are different from the earlier expected topicIdPartitions
        // as empty strings are converted to nulls.
        assertEquals(expectedForgottenTopicData.size(), forgottenTopics.size());
        List<TopicIdPartition> expectedForgottenTopics = new LinkedList<>();
        expectedForgottenTopicData.forEach(tidp -> {
            String expectedName = fetchRequestUsesTopicIds ? topicNames.get(tidp.topicId()) : tidp.topic();
            expectedForgottenTopics.add(new TopicIdPartition(tidp.topicId(), new TopicPartition(expectedName, tidp.partition())));
        });
        assertEquals(expectedForgottenTopics, forgottenTopics);
    }
}
Also used : Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) Arguments(org.junit.jupiter.params.provider.Arguments) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) Test(org.junit.jupiter.api.Test) LinkedHashMap(java.util.LinkedHashMap) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) List(java.util.List) Stream(java.util.stream.Stream) Map(java.util.Map) Optional(java.util.Optional) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) LinkedList(java.util.LinkedList) Collections(java.util.Collections) MethodSource(org.junit.jupiter.params.provider.MethodSource) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) LinkedList(java.util.LinkedList) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Aggregations

TopicIdPartition (org.apache.kafka.common.TopicIdPartition)47 TopicPartition (org.apache.kafka.common.TopicPartition)32 Test (org.junit.jupiter.api.Test)25 LinkedHashMap (java.util.LinkedHashMap)22 Uuid (org.apache.kafka.common.Uuid)18 ArrayList (java.util.ArrayList)17 HashMap (java.util.HashMap)16 FetchResponseData (org.apache.kafka.common.message.FetchResponseData)15 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)13 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)13 List (java.util.List)12 FetchRequest (org.apache.kafka.common.requests.FetchRequest)12 PartitionData (org.apache.kafka.common.requests.FetchRequest.PartitionData)12 Arrays.asList (java.util.Arrays.asList)10 Collections.emptyList (java.util.Collections.emptyList)10 Collections.singletonList (java.util.Collections.singletonList)10 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)10 KafkaException (org.apache.kafka.common.KafkaException)9 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)8 FetchResponse (org.apache.kafka.common.requests.FetchResponse)8