Search in sources :

Example 1 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class MetadataTest method testEpochUpdateAfterTopicDeletion.

@Test
public void testEpochUpdateAfterTopicDeletion() {
    TopicPartition tp = new TopicPartition("topic-1", 0);
    MetadataResponse metadataResponse = emptyMetadataResponse();
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
    // Start with a Topic topic-1 with a random topic ID
    Map<String, Uuid> topicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
    metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 10, topicIds);
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
    assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
    // Topic topic-1 is now deleted so Response contains an Error. LeaderEpoch should still maintain Old value
    metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.singletonMap("topic-1", Errors.UNKNOWN_TOPIC_OR_PARTITION), Collections.emptyMap());
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
    assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
    // Create topic-1 again but this time with a different topic ID. LeaderEpoch should be updated to new even if lower.
    Map<String, Uuid> newTopicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
    metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 5, newTopicIds);
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
    assertEquals(Optional.of(5), metadata.lastSeenLeaderEpoch(tp));
}
Also used : Uuid(org.apache.kafka.common.Uuid) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) MessageUtil(org.apache.kafka.common.protocol.MessageUtil) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) MockClusterResourceListener(org.apache.kafka.test.MockClusterResourceListener) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) RequestTestUtils(org.apache.kafka.common.requests.RequestTestUtils) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteBuffer(java.nio.ByteBuffer) HashSet(java.util.HashSet) Cluster(org.apache.kafka.common.Cluster) MetadataResponseBrokerCollection(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseBrokerCollection) MetadataResponseTopic(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TestUtils.assertOptional(org.apache.kafka.test.TestUtils.assertOptional) Topic(org.apache.kafka.common.internals.Topic) Utils(org.apache.kafka.common.utils.Utils) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponseTopicCollection(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopicCollection) Time(org.apache.kafka.common.utils.Time) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Set(java.util.Set) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) InetSocketAddress(java.net.InetSocketAddress) Test(org.junit.jupiter.api.Test) Objects(java.util.Objects) List(java.util.List) MetadataResponsePartition(org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) Errors(org.apache.kafka.common.protocol.Errors) Optional(java.util.Optional) Node(org.apache.kafka.common.Node) MetadataResponseData(org.apache.kafka.common.message.MetadataResponseData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) Collections(java.util.Collections) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) Test(org.junit.jupiter.api.Test)

Example 2 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class FetchSessionHandlerTest method testSessionless.

/**
 * Test the handling of SESSIONLESS responses.
 * Pre-KIP-227 brokers always supply this kind of response.
 */
@Test
public void testSessionless() {
    Map<String, Uuid> topicIds = new HashMap<>();
    Map<Uuid, String> topicNames = new HashMap<>();
    // We want to test both on older versions that do not use topic IDs and on newer versions that do.
    List<Short> versions = Arrays.asList((short) 12, ApiKeys.FETCH.latestVersion());
    versions.forEach(version -> {
        FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
        FetchSessionHandler.Builder builder = handler.newBuilder();
        addTopicId(topicIds, topicNames, "foo", version);
        Uuid fooId = topicIds.getOrDefault("foo", Uuid.ZERO_UUID);
        builder.add(new TopicPartition("foo", 0), new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
        builder.add(new TopicPartition("foo", 1), new FetchRequest.PartitionData(fooId, 10, 110, 210, Optional.empty()));
        FetchSessionHandler.FetchRequestData data = builder.build();
        assertMapsEqual(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200), new ReqEntry("foo", fooId, 1, 10, 110, 210)), data.toSend(), data.sessionPartitions());
        assertEquals(INVALID_SESSION_ID, data.metadata().sessionId());
        assertEquals(INITIAL_EPOCH, data.metadata().epoch());
        FetchResponse resp = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, fooId, 0, 0), new RespEntry("foo", 1, fooId, 0, 0)));
        handler.handleResponse(resp, version);
        FetchSessionHandler.Builder builder2 = handler.newBuilder();
        builder2.add(new TopicPartition("foo", 0), new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
        FetchSessionHandler.FetchRequestData data2 = builder2.build();
        assertEquals(INVALID_SESSION_ID, data2.metadata().sessionId());
        assertEquals(INITIAL_EPOCH, data2.metadata().epoch());
        assertMapsEqual(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200)), data2.toSend(), data2.sessionPartitions());
    });
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) FetchResponse(org.apache.kafka.common.requests.FetchResponse) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 3 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class FetchSessionHandlerTest method testIncrementalPartitionRemoval.

@Test
public void testIncrementalPartitionRemoval() {
    Map<String, Uuid> topicIds = new HashMap<>();
    Map<Uuid, String> topicNames = new HashMap<>();
    // We want to test both on older versions that do not use topic IDs and on newer versions that do.
    List<Short> versions = Arrays.asList((short) 12, ApiKeys.FETCH.latestVersion());
    versions.forEach(version -> {
        FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
        FetchSessionHandler.Builder builder = handler.newBuilder();
        addTopicId(topicIds, topicNames, "foo", version);
        addTopicId(topicIds, topicNames, "bar", version);
        Uuid fooId = topicIds.getOrDefault("foo", Uuid.ZERO_UUID);
        Uuid barId = topicIds.getOrDefault("bar", Uuid.ZERO_UUID);
        TopicPartition foo0 = new TopicPartition("foo", 0);
        TopicPartition foo1 = new TopicPartition("foo", 1);
        TopicPartition bar0 = new TopicPartition("bar", 0);
        builder.add(foo0, new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
        builder.add(foo1, new FetchRequest.PartitionData(fooId, 10, 110, 210, Optional.empty()));
        builder.add(bar0, new FetchRequest.PartitionData(barId, 20, 120, 220, Optional.empty()));
        FetchSessionHandler.FetchRequestData data = builder.build();
        assertMapsEqual(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200), new ReqEntry("foo", fooId, 1, 10, 110, 210), new ReqEntry("bar", barId, 0, 20, 120, 220)), data.toSend(), data.sessionPartitions());
        assertTrue(data.metadata().isFull());
        FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, fooId, 10, 20), new RespEntry("foo", 1, fooId, 10, 20), new RespEntry("bar", 0, barId, 10, 20)));
        handler.handleResponse(resp, version);
        // Test an incremental fetch request which removes two partitions.
        FetchSessionHandler.Builder builder2 = handler.newBuilder();
        builder2.add(foo1, new FetchRequest.PartitionData(fooId, 10, 110, 210, Optional.empty()));
        FetchSessionHandler.FetchRequestData data2 = builder2.build();
        assertFalse(data2.metadata().isFull());
        assertEquals(123, data2.metadata().sessionId());
        assertEquals(1, data2.metadata().epoch());
        assertMapEquals(reqMap(new ReqEntry("foo", fooId, 1, 10, 110, 210)), data2.sessionPartitions());
        assertMapEquals(reqMap(), data2.toSend());
        ArrayList<TopicIdPartition> expectedToForget2 = new ArrayList<>();
        expectedToForget2.add(new TopicIdPartition(fooId, foo0));
        expectedToForget2.add(new TopicIdPartition(barId, bar0));
        assertListEquals(expectedToForget2, data2.toForget());
        // A FETCH_SESSION_ID_NOT_FOUND response triggers us to close the session.
        // The next request is a session establishing FULL request.
        FetchResponse resp2 = FetchResponse.of(Errors.FETCH_SESSION_ID_NOT_FOUND, 0, INVALID_SESSION_ID, respMap());
        handler.handleResponse(resp2, version);
        FetchSessionHandler.Builder builder3 = handler.newBuilder();
        builder3.add(foo0, new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
        FetchSessionHandler.FetchRequestData data3 = builder3.build();
        assertTrue(data3.metadata().isFull());
        assertEquals(INVALID_SESSION_ID, data3.metadata().sessionId());
        assertEquals(INITIAL_EPOCH, data3.metadata().epoch());
        assertMapsEqual(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200)), data3.sessionPartitions(), data3.toSend());
    });
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) FetchResponse(org.apache.kafka.common.requests.FetchResponse) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 4 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class FetchSessionHandlerTest method testTopicIdReplaced.

@ParameterizedTest
@MethodSource("idUsageCombinations")
public void testTopicIdReplaced(boolean startsWithTopicIds, boolean endsWithTopicIds) {
    TopicPartition tp = new TopicPartition("foo", 0);
    FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
    FetchSessionHandler.Builder builder = handler.newBuilder();
    Uuid topicId1 = startsWithTopicIds ? Uuid.randomUuid() : Uuid.ZERO_UUID;
    builder.add(tp, new FetchRequest.PartitionData(topicId1, 0, 100, 200, Optional.empty()));
    FetchSessionHandler.FetchRequestData data = builder.build();
    assertMapsEqual(reqMap(new ReqEntry("foo", topicId1, 0, 0, 100, 200)), data.toSend(), data.sessionPartitions());
    assertTrue(data.metadata().isFull());
    assertEquals(startsWithTopicIds, data.canUseTopicIds());
    FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, topicId1, 10, 20)));
    short version = startsWithTopicIds ? ApiKeys.FETCH.latestVersion() : 12;
    handler.handleResponse(resp, version);
    // Try to add a new topic ID.
    FetchSessionHandler.Builder builder2 = handler.newBuilder();
    Uuid topicId2 = endsWithTopicIds ? Uuid.randomUuid() : Uuid.ZERO_UUID;
    // Use the same data besides the topic ID.
    FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData(topicId2, 0, 100, 200, Optional.empty());
    builder2.add(tp, partitionData);
    FetchSessionHandler.FetchRequestData data2 = builder2.build();
    if (startsWithTopicIds && endsWithTopicIds) {
        // If we started with an ID, both a only a new ID will count towards replaced.
        // The old topic ID partition should be in toReplace, and the new one should be in toSend.
        assertEquals(Collections.singletonList(new TopicIdPartition(topicId1, tp)), data2.toReplace());
        assertMapsEqual(reqMap(new ReqEntry("foo", topicId2, 0, 0, 100, 200)), data2.toSend(), data2.sessionPartitions());
        // sessionTopicNames should contain only the second topic ID.
        assertEquals(Collections.singletonMap(topicId2, tp.topic()), handler.sessionTopicNames());
    } else if (startsWithTopicIds || endsWithTopicIds) {
        // If we downgraded to not using topic IDs we will want to send this data.
        // However, we will not mark the partition as one replaced. In this scenario, we should see the session close due to
        // changing request types.
        // We will have the new topic ID in the session partition map
        assertEquals(Collections.emptyList(), data2.toReplace());
        assertMapsEqual(reqMap(new ReqEntry("foo", topicId2, 0, 0, 100, 200)), data2.toSend(), data2.sessionPartitions());
        // The old topic ID should be removed as the map will be empty if the request doesn't use topic IDs.
        if (endsWithTopicIds) {
            assertEquals(Collections.singletonMap(topicId2, tp.topic()), handler.sessionTopicNames());
        } else {
            assertEquals(Collections.emptyMap(), handler.sessionTopicNames());
        }
    } else {
        // Otherwise, we have no partition in toReplace and since the partition and topic ID was not updated, there is no data to send.
        assertEquals(Collections.emptyList(), data2.toReplace());
        assertEquals(Collections.emptyMap(), data2.toSend());
        assertMapsEqual(reqMap(new ReqEntry("foo", topicId2, 0, 0, 100, 200)), data2.sessionPartitions());
        // There is also nothing in the sessionTopicNames map, as there are no topic IDs used.
        assertEquals(Collections.emptyMap(), handler.sessionTopicNames());
    }
    // Should have the same session ID, and next epoch and can use topic IDs if it ended with topic IDs.
    assertEquals(123, data2.metadata().sessionId(), "Did not use same session");
    assertEquals(1, data2.metadata().epoch(), "Did not have correct epoch");
    assertEquals(endsWithTopicIds, data2.canUseTopicIds());
}
Also used : FetchResponse(org.apache.kafka.common.requests.FetchResponse) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequest(org.apache.kafka.common.requests.FetchRequest) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 5 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class FetchSessionHandlerTest method addTopicId.

private void addTopicId(Map<String, Uuid> topicIds, Map<Uuid, String> topicNames, String name, short version) {
    if (version >= 13) {
        Uuid id = Uuid.randomUuid();
        topicIds.put(name, id);
        topicNames.put(id, name);
    }
}
Also used : Uuid(org.apache.kafka.common.Uuid)

Aggregations

Uuid (org.apache.kafka.common.Uuid)95 Test (org.junit.jupiter.api.Test)55 HashMap (java.util.HashMap)42 TopicPartition (org.apache.kafka.common.TopicPartition)40 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)30 ArrayList (java.util.ArrayList)29 Map (java.util.Map)21 ApiMessageAndVersion (org.apache.kafka.server.common.ApiMessageAndVersion)21 LinkedHashMap (java.util.LinkedHashMap)18 List (java.util.List)15 FetchRequest (org.apache.kafka.common.requests.FetchRequest)14 TopicIdPartition (org.apache.kafka.common.TopicIdPartition)13 Errors (org.apache.kafka.common.protocol.Errors)12 FetchResponse (org.apache.kafka.common.requests.FetchResponse)12 Collections (java.util.Collections)11 ByteBuffer (java.nio.ByteBuffer)10 Node (org.apache.kafka.common.Node)10 CreateTopicsResponseData (org.apache.kafka.common.message.CreateTopicsResponseData)10 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)10 PartitionRegistration (org.apache.kafka.metadata.PartitionRegistration)10