Search in sources :

Example 26 with ByteBufferAccessor

use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.

the class ConsumerProtocol method deserializeSubscription.

public static Subscription deserializeSubscription(final ByteBuffer buffer, short version) {
    version = checkSubscriptionVersion(version);
    try {
        ConsumerProtocolSubscription data = new ConsumerProtocolSubscription(new ByteBufferAccessor(buffer), version);
        List<TopicPartition> ownedPartitions = new ArrayList<>();
        for (ConsumerProtocolSubscription.TopicPartition tp : data.ownedPartitions()) {
            for (Integer partition : tp.partitions()) {
                ownedPartitions.add(new TopicPartition(tp.topic(), partition));
            }
        }
        return new Subscription(data.topics(), data.userData() != null ? data.userData().duplicate() : null, ownedPartitions);
    } catch (BufferUnderflowException e) {
        throw new SchemaException("Buffer underflow while parsing consumer protocol's subscription", e);
    }
}
Also used : SchemaException(org.apache.kafka.common.protocol.types.SchemaException) TopicPartition(org.apache.kafka.common.TopicPartition) ConsumerProtocolSubscription(org.apache.kafka.common.message.ConsumerProtocolSubscription) ArrayList(java.util.ArrayList) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) ConsumerProtocolSubscription(org.apache.kafka.common.message.ConsumerProtocolSubscription) BufferUnderflowException(java.nio.BufferUnderflowException)

Example 27 with ByteBufferAccessor

use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.

the class RequestResponseTest method testFetchRequestCompat.

@Test
public void testFetchRequestCompat() {
    Map<TopicPartition, FetchRequest.PartitionData> fetchData = new HashMap<>();
    fetchData.put(new TopicPartition("test", 0), new FetchRequest.PartitionData(Uuid.ZERO_UUID, 100, 2, 100, Optional.of(42)));
    FetchRequest req = FetchRequest.Builder.forConsumer((short) 2, 100, 100, fetchData).metadata(new FetchMetadata(10, 20)).isolationLevel(IsolationLevel.READ_COMMITTED).build((short) 2);
    FetchRequestData data = req.data();
    ObjectSerializationCache cache = new ObjectSerializationCache();
    int size = data.size(cache, (short) 2);
    ByteBufferAccessor writer = new ByteBufferAccessor(ByteBuffer.allocate(size));
    data.write(writer, cache, (short) 2);
}
Also used : ObjectSerializationCache(org.apache.kafka.common.protocol.ObjectSerializationCache) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequestData(org.apache.kafka.common.message.FetchRequestData) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) UpdateMetadataEndpoint(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint) Test(org.junit.jupiter.api.Test)

Example 28 with ByteBufferAccessor

use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.

the class LeaderAndIsrRequestTest method testVersionLogic.

/**
 * Verifies the logic we have in LeaderAndIsrRequest to present a unified interface across the various versions
 * works correctly. For example, `LeaderAndIsrPartitionState.topicName` is not serialiazed/deserialized in
 * recent versions, but we set it manually so that we can always present the ungrouped partition states
 * independently of the version.
 */
@Test
public void testVersionLogic() {
    for (short version : LEADER_AND_ISR.allVersions()) {
        List<LeaderAndIsrPartitionState> partitionStates = asList(new LeaderAndIsrPartitionState().setTopicName("topic0").setPartitionIndex(0).setControllerEpoch(2).setLeader(0).setLeaderEpoch(10).setIsr(asList(0, 1)).setZkVersion(10).setReplicas(asList(0, 1, 2)).setAddingReplicas(asList(3)).setRemovingReplicas(asList(2)), new LeaderAndIsrPartitionState().setTopicName("topic0").setPartitionIndex(1).setControllerEpoch(2).setLeader(1).setLeaderEpoch(11).setIsr(asList(1, 2, 3)).setZkVersion(11).setReplicas(asList(1, 2, 3)).setAddingReplicas(emptyList()).setRemovingReplicas(emptyList()), new LeaderAndIsrPartitionState().setTopicName("topic1").setPartitionIndex(0).setControllerEpoch(2).setLeader(2).setLeaderEpoch(11).setIsr(asList(2, 3, 4)).setZkVersion(11).setReplicas(asList(2, 3, 4)).setAddingReplicas(emptyList()).setRemovingReplicas(emptyList()));
        List<Node> liveNodes = asList(new Node(0, "host0", 9090), new Node(1, "host1", 9091));
        Map<String, Uuid> topicIds = new HashMap<>();
        topicIds.put("topic0", Uuid.randomUuid());
        topicIds.put("topic1", Uuid.randomUuid());
        LeaderAndIsrRequest request = new LeaderAndIsrRequest.Builder(version, 1, 2, 3, partitionStates, topicIds, liveNodes).build();
        List<LeaderAndIsrLiveLeader> liveLeaders = liveNodes.stream().map(n -> new LeaderAndIsrLiveLeader().setBrokerId(n.id()).setHostName(n.host()).setPort(n.port())).collect(Collectors.toList());
        assertEquals(new HashSet<>(partitionStates), iterableToSet(request.partitionStates()));
        assertEquals(liveLeaders, request.liveLeaders());
        assertEquals(1, request.controllerId());
        assertEquals(2, request.controllerEpoch());
        assertEquals(3, request.brokerEpoch());
        ByteBuffer byteBuffer = request.serialize();
        LeaderAndIsrRequest deserializedRequest = new LeaderAndIsrRequest(new LeaderAndIsrRequestData(new ByteBufferAccessor(byteBuffer), version), version);
        // them for earlier versions.
        if (version < 3) {
            partitionStates.get(0).setAddingReplicas(emptyList()).setRemovingReplicas(emptyList());
        }
        // TopicStates is an empty map.
        if (version < 2) {
            topicIds = new HashMap<>();
        }
        // Zero Uuids in place.
        if (version > 1 && version < 5) {
            topicIds.put("topic0", Uuid.ZERO_UUID);
            topicIds.put("topic1", Uuid.ZERO_UUID);
        }
        assertEquals(new HashSet<>(partitionStates), iterableToSet(deserializedRequest.partitionStates()));
        assertEquals(topicIds, deserializedRequest.topicIds());
        assertEquals(liveLeaders, deserializedRequest.liveLeaders());
        assertEquals(1, request.controllerId());
        assertEquals(2, request.controllerEpoch());
        assertEquals(3, request.brokerEpoch());
    }
}
Also used : Uuid(org.apache.kafka.common.Uuid) LeaderAndIsrPartitionState(org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) LeaderAndIsrRequestData(org.apache.kafka.common.message.LeaderAndIsrRequestData) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) LeaderAndIsrTopicError(org.apache.kafka.common.message.LeaderAndIsrResponseData.LeaderAndIsrTopicError) HashMap(java.util.HashMap) LEADER_AND_ISR(org.apache.kafka.common.protocol.ApiKeys.LEADER_AND_ISR) ClusterAuthorizationException(org.apache.kafka.common.errors.ClusterAuthorizationException) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Arrays.asList(java.util.Arrays.asList) Map(java.util.Map) StreamSupport(java.util.stream.StreamSupport) LeaderAndIsrPartitionError(org.apache.kafka.common.message.LeaderAndIsrResponseData.LeaderAndIsrPartitionError) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TopicPartition(org.apache.kafka.common.TopicPartition) TestUtils(org.apache.kafka.test.TestUtils) Collections.emptyList(java.util.Collections.emptyList) Set(java.util.Set) Collectors(java.util.stream.Collectors) Test(org.junit.jupiter.api.Test) List(java.util.List) LeaderAndIsrLiveLeader(org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrLiveLeader) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) Collections(java.util.Collections) HashMap(java.util.HashMap) Node(org.apache.kafka.common.Node) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) ByteBuffer(java.nio.ByteBuffer) LeaderAndIsrLiveLeader(org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrLiveLeader) Uuid(org.apache.kafka.common.Uuid) LeaderAndIsrRequestData(org.apache.kafka.common.message.LeaderAndIsrRequestData) LeaderAndIsrPartitionState(org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState) Test(org.junit.jupiter.api.Test)

Example 29 with ByteBufferAccessor

use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.

the class OffsetFetchResponseTest method testStructBuild.

/**
 * Test behavior changes over the versions. Refer to resources.common.messages.OffsetFetchResponse.json
 */
@Test
public void testStructBuild() {
    for (short version : ApiKeys.OFFSET_FETCH.allVersions()) {
        if (version < 8) {
            partitionDataMap.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData(offset, leaderEpochTwo, metadata, Errors.GROUP_AUTHORIZATION_FAILED));
            OffsetFetchResponse latestResponse = new OffsetFetchResponse(throttleTimeMs, Errors.NONE, partitionDataMap);
            OffsetFetchResponseData data = new OffsetFetchResponseData(new ByteBufferAccessor(latestResponse.serialize(version)), version);
            OffsetFetchResponse oldResponse = new OffsetFetchResponse(data, version);
            if (version <= 1) {
                assertEquals(Errors.NONE.code(), data.errorCode());
                // Partition level error populated in older versions.
                assertEquals(Errors.GROUP_AUTHORIZATION_FAILED, oldResponse.error());
                assertEquals(Utils.mkMap(Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 2), Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), oldResponse.errorCounts());
            } else {
                assertEquals(Errors.NONE.code(), data.errorCode());
                assertEquals(Errors.NONE, oldResponse.error());
                assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NONE, 1), Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 1), Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), oldResponse.errorCounts());
            }
            if (version <= 2) {
                assertEquals(DEFAULT_THROTTLE_TIME, oldResponse.throttleTimeMs());
            } else {
                assertEquals(throttleTimeMs, oldResponse.throttleTimeMs());
            }
            Map<TopicPartition, PartitionData> expectedDataMap = new HashMap<>();
            for (Map.Entry<TopicPartition, PartitionData> entry : partitionDataMap.entrySet()) {
                PartitionData partitionData = entry.getValue();
                expectedDataMap.put(entry.getKey(), new PartitionData(partitionData.offset, version <= 4 ? Optional.empty() : partitionData.leaderEpoch, partitionData.metadata, partitionData.error));
            }
            Map<TopicPartition, PartitionData> responseData = oldResponse.responseDataV0ToV7();
            assertEquals(expectedDataMap, responseData);
            responseData.forEach((tp, rdata) -> assertTrue(rdata.hasError()));
        } else {
            partitionDataMap.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData(offset, leaderEpochTwo, metadata, Errors.GROUP_AUTHORIZATION_FAILED));
            OffsetFetchResponse latestResponse = new OffsetFetchResponse(throttleTimeMs, Collections.singletonMap(groupOne, Errors.NONE), Collections.singletonMap(groupOne, partitionDataMap));
            OffsetFetchResponseData data = new OffsetFetchResponseData(new ByteBufferAccessor(latestResponse.serialize(version)), version);
            OffsetFetchResponse oldResponse = new OffsetFetchResponse(data, version);
            assertEquals(Errors.NONE.code(), data.groups().get(0).errorCode());
            assertEquals(Errors.NONE, oldResponse.groupLevelError(groupOne));
            assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NONE, 1), Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 1), Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), oldResponse.errorCounts());
            assertEquals(throttleTimeMs, oldResponse.throttleTimeMs());
            Map<TopicPartition, PartitionData> expectedDataMap = new HashMap<>();
            for (Map.Entry<TopicPartition, PartitionData> entry : partitionDataMap.entrySet()) {
                PartitionData partitionData = entry.getValue();
                expectedDataMap.put(entry.getKey(), new PartitionData(partitionData.offset, partitionData.leaderEpoch, partitionData.metadata, partitionData.error));
            }
            Map<TopicPartition, PartitionData> responseData = oldResponse.partitionDataMap(groupOne);
            assertEquals(expectedDataMap, responseData);
            responseData.forEach((tp, rdata) -> assertTrue(rdata.hasError()));
        }
    }
}
Also used : OffsetFetchResponseData(org.apache.kafka.common.message.OffsetFetchResponseData) PartitionData(org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.jupiter.api.Test)

Example 30 with ByteBufferAccessor

use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.

the class RequestHeaderTest method parseHeaderWithNullClientId.

@Test
public void parseHeaderWithNullClientId() {
    RequestHeaderData headerData = new RequestHeaderData().setClientId(null).setCorrelationId(123).setRequestApiKey(ApiKeys.FIND_COORDINATOR.id).setRequestApiVersion((short) 10);
    ObjectSerializationCache serializationCache = new ObjectSerializationCache();
    ByteBuffer buffer = ByteBuffer.allocate(headerData.size(serializationCache, (short) 2));
    headerData.write(new ByteBufferAccessor(buffer), serializationCache, (short) 2);
    buffer.flip();
    RequestHeader parsed = RequestHeader.parse(buffer);
    assertEquals("", parsed.clientId());
    assertEquals(123, parsed.correlationId());
    assertEquals(ApiKeys.FIND_COORDINATOR, parsed.apiKey());
    assertEquals((short) 10, parsed.apiVersion());
}
Also used : ObjectSerializationCache(org.apache.kafka.common.protocol.ObjectSerializationCache) RequestHeaderData(org.apache.kafka.common.message.RequestHeaderData) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Aggregations

ByteBufferAccessor (org.apache.kafka.common.protocol.ByteBufferAccessor)39 ByteBuffer (java.nio.ByteBuffer)24 Test (org.junit.jupiter.api.Test)23 ObjectSerializationCache (org.apache.kafka.common.protocol.ObjectSerializationCache)13 TopicPartition (org.apache.kafka.common.TopicPartition)6 ArrayList (java.util.ArrayList)5 HashMap (java.util.HashMap)4 Uuid (org.apache.kafka.common.Uuid)4 UnsupportedVersionException (org.apache.kafka.common.errors.UnsupportedVersionException)4 Collections (java.util.Collections)3 List (java.util.List)3 Map (java.util.Map)3 LeaderChangeMessage (org.apache.kafka.common.message.LeaderChangeMessage)3 UpdateMetadataEndpoint (org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint)3 Send (org.apache.kafka.common.network.Send)3 BufferUnderflowException (java.nio.BufferUnderflowException)2 Arrays.asList (java.util.Arrays.asList)2 Collections.emptyList (java.util.Collections.emptyList)2 HashSet (java.util.HashSet)2 Set (java.util.Set)2