Search in sources :

Example 11 with ByteBufferAccessor

use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.

the class RequestResponseTest method verifyFetchResponseFullWrite.

private void verifyFetchResponseFullWrite(short version, FetchResponse fetchResponse) throws Exception {
    int correlationId = 15;
    short responseHeaderVersion = FETCH.responseHeaderVersion(version);
    Send send = fetchResponse.toSend(new ResponseHeader(correlationId, responseHeaderVersion), version);
    ByteBufferChannel channel = new ByteBufferChannel(send.size());
    send.writeTo(channel);
    channel.close();
    ByteBuffer buf = channel.buffer();
    // read the size
    int size = buf.getInt();
    assertTrue(size > 0);
    // read the header
    ResponseHeader responseHeader = ResponseHeader.parse(channel.buffer(), responseHeaderVersion);
    assertEquals(correlationId, responseHeader.correlationId());
    assertEquals(fetchResponse.serialize(version), buf);
    FetchResponseData deserialized = new FetchResponseData(new ByteBufferAccessor(buf), version);
    ObjectSerializationCache serializationCache = new ObjectSerializationCache();
    assertEquals(size, responseHeader.size(serializationCache) + deserialized.size(serializationCache, version));
}
Also used : ObjectSerializationCache(org.apache.kafka.common.protocol.ObjectSerializationCache) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) ByteBuffer(java.nio.ByteBuffer) UpdateMetadataEndpoint(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint) Send(org.apache.kafka.common.network.Send)

Example 12 with ByteBufferAccessor

use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.

the class UpdateMetadataRequestTest method testVersionLogic.

/**
 * Verifies the logic we have in UpdateMetadataRequest to present a unified interface across the various versions
 * works correctly. For example, `UpdateMetadataPartitionState.topicName` is not serialiazed/deserialized in
 * recent versions, but we set it manually so that we can always present the ungrouped partition states
 * independently of the version.
 */
@Test
public void testVersionLogic() {
    String topic0 = "topic0";
    String topic1 = "topic1";
    for (short version : UPDATE_METADATA.allVersions()) {
        List<UpdateMetadataPartitionState> partitionStates = asList(new UpdateMetadataPartitionState().setTopicName(topic0).setPartitionIndex(0).setControllerEpoch(2).setLeader(0).setLeaderEpoch(10).setIsr(asList(0, 1)).setZkVersion(10).setReplicas(asList(0, 1, 2)).setOfflineReplicas(asList(2)), new UpdateMetadataPartitionState().setTopicName(topic0).setPartitionIndex(1).setControllerEpoch(2).setLeader(1).setLeaderEpoch(11).setIsr(asList(1, 2, 3)).setZkVersion(11).setReplicas(asList(1, 2, 3)).setOfflineReplicas(emptyList()), new UpdateMetadataPartitionState().setTopicName(topic1).setPartitionIndex(0).setControllerEpoch(2).setLeader(2).setLeaderEpoch(11).setIsr(asList(2, 3)).setZkVersion(11).setReplicas(asList(2, 3, 4)).setOfflineReplicas(emptyList()));
        List<UpdateMetadataEndpoint> broker0Endpoints = new ArrayList<>();
        broker0Endpoints.add(new UpdateMetadataEndpoint().setHost("host0").setPort(9090).setSecurityProtocol(SecurityProtocol.PLAINTEXT.id));
        // Non plaintext endpoints are only supported from version 1
        if (version >= 1) {
            broker0Endpoints.add(new UpdateMetadataEndpoint().setHost("host0").setPort(9091).setSecurityProtocol(SecurityProtocol.SSL.id));
        }
        // Custom listeners are only supported from version 3
        if (version >= 3) {
            broker0Endpoints.get(0).setListener("listener0");
            broker0Endpoints.get(1).setListener("listener1");
        }
        List<UpdateMetadataBroker> liveBrokers = asList(new UpdateMetadataBroker().setId(0).setRack("rack0").setEndpoints(broker0Endpoints), new UpdateMetadataBroker().setId(1).setEndpoints(asList(new UpdateMetadataEndpoint().setHost("host1").setPort(9090).setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setListener("PLAINTEXT"))));
        Map<String, Uuid> topicIds = new HashMap<>();
        topicIds.put(topic0, Uuid.randomUuid());
        topicIds.put(topic1, Uuid.randomUuid());
        UpdateMetadataRequest request = new UpdateMetadataRequest.Builder(version, 1, 2, 3, partitionStates, liveBrokers, topicIds).build();
        assertEquals(new HashSet<>(partitionStates), iterableToSet(request.partitionStates()));
        assertEquals(liveBrokers, request.liveBrokers());
        assertEquals(1, request.controllerId());
        assertEquals(2, request.controllerEpoch());
        assertEquals(3, request.brokerEpoch());
        ByteBuffer byteBuffer = request.serialize();
        UpdateMetadataRequest deserializedRequest = new UpdateMetadataRequest(new UpdateMetadataRequestData(new ByteBufferAccessor(byteBuffer), version), version);
        // Rack is only supported from version 2
        if (version < 2) {
            for (UpdateMetadataBroker liveBroker : liveBrokers) liveBroker.setRack("");
        }
        // Non plaintext listener name is only supported from version 3
        if (version < 3) {
            for (UpdateMetadataBroker liveBroker : liveBrokers) {
                for (UpdateMetadataEndpoint endpoint : liveBroker.endpoints()) {
                    SecurityProtocol securityProtocol = SecurityProtocol.forId(endpoint.securityProtocol());
                    endpoint.setListener(ListenerName.forSecurityProtocol(securityProtocol).value());
                }
            }
        }
        // Offline replicas are only supported from version 4
        if (version < 4)
            partitionStates.get(0).setOfflineReplicas(emptyList());
        assertEquals(new HashSet<>(partitionStates), iterableToSet(deserializedRequest.partitionStates()));
        assertEquals(liveBrokers, deserializedRequest.liveBrokers());
        assertEquals(1, deserializedRequest.controllerId());
        assertEquals(2, deserializedRequest.controllerEpoch());
        // Broker epoch is only supported from version 5
        if (version >= 5)
            assertEquals(3, deserializedRequest.brokerEpoch());
        else
            assertEquals(-1, deserializedRequest.brokerEpoch());
        long topicIdCount = deserializedRequest.data().topicStates().stream().map(UpdateMetadataRequestData.UpdateMetadataTopicState::topicId).filter(topicId -> topicId != Uuid.ZERO_UUID).count();
        if (version >= 7)
            assertEquals(2, topicIdCount);
        else
            assertEquals(0, topicIdCount);
    }
}
Also used : Uuid(org.apache.kafka.common.Uuid) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) HashMap(java.util.HashMap) UpdateMetadataBroker(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataBroker) ClusterAuthorizationException(org.apache.kafka.common.errors.ClusterAuthorizationException) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ListenerName(org.apache.kafka.common.network.ListenerName) Arrays.asList(java.util.Arrays.asList) Map(java.util.Map) UpdateMetadataPartitionState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState) StreamSupport(java.util.stream.StreamSupport) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TopicPartition(org.apache.kafka.common.TopicPartition) UpdateMetadataRequestData(org.apache.kafka.common.message.UpdateMetadataRequestData) TestUtils(org.apache.kafka.test.TestUtils) Collections.emptyList(java.util.Collections.emptyList) Set(java.util.Set) UPDATE_METADATA(org.apache.kafka.common.protocol.ApiKeys.UPDATE_METADATA) Collectors(java.util.stream.Collectors) UpdateMetadataEndpoint(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint) Test(org.junit.jupiter.api.Test) List(java.util.List) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) Errors(org.apache.kafka.common.protocol.Errors) Collections(java.util.Collections) UpdateMetadataBroker(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataBroker) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) ByteBuffer(java.nio.ByteBuffer) Uuid(org.apache.kafka.common.Uuid) UpdateMetadataRequestData(org.apache.kafka.common.message.UpdateMetadataRequestData) UpdateMetadataEndpoint(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint) UpdateMetadataPartitionState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState) Test(org.junit.jupiter.api.Test)

Example 13 with ByteBufferAccessor

use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.

the class MessageTest method testByteBufferRoundTrip.

private void testByteBufferRoundTrip(short version, Message message, Message expected) throws Exception {
    ObjectSerializationCache cache = new ObjectSerializationCache();
    int size = message.size(cache, version);
    ByteBuffer buf = ByteBuffer.allocate(size);
    ByteBufferAccessor byteBufferAccessor = new ByteBufferAccessor(buf);
    message.write(byteBufferAccessor, cache, version);
    assertEquals(size, buf.position(), "The result of the size function does not match the number of bytes " + "written for version " + version);
    Message message2 = message.getClass().getConstructor().newInstance();
    buf.flip();
    message2.read(byteBufferAccessor, version);
    assertEquals(size, buf.position(), "The result of the size function does not match the number of bytes " + "read back in for version " + version);
    assertEquals(expected, message2, "The message object created after a round trip did not match for " + "version " + version);
    assertEquals(expected.hashCode(), message2.hashCode());
    assertEquals(expected.toString(), message2.toString());
}
Also used : ObjectSerializationCache(org.apache.kafka.common.protocol.ObjectSerializationCache) Message(org.apache.kafka.common.protocol.Message) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) ByteBuffer(java.nio.ByteBuffer)

Example 14 with ByteBufferAccessor

use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.

the class MessageTest method verifyWriteSucceeds.

private void verifyWriteSucceeds(short version, Message message) {
    ObjectSerializationCache cache = new ObjectSerializationCache();
    int size = message.size(cache, version);
    ByteBuffer buf = ByteBuffer.allocate(size * 2);
    ByteBufferAccessor byteBufferAccessor = new ByteBufferAccessor(buf);
    message.write(byteBufferAccessor, cache, version);
    assertEquals(size, buf.position(), "Expected the serialized size to be " + size + ", but it was " + buf.position());
}
Also used : ObjectSerializationCache(org.apache.kafka.common.protocol.ObjectSerializationCache) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) ByteBuffer(java.nio.ByteBuffer)

Example 15 with ByteBufferAccessor

use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.

the class MessageTest method verifyWriteRaisesNpe.

private void verifyWriteRaisesNpe(short version, Message message) {
    ObjectSerializationCache cache = new ObjectSerializationCache();
    assertThrows(NullPointerException.class, () -> {
        int size = message.size(cache, version);
        ByteBuffer buf = ByteBuffer.allocate(size);
        ByteBufferAccessor byteBufferAccessor = new ByteBufferAccessor(buf);
        message.write(byteBufferAccessor, cache, version);
    });
}
Also used : ObjectSerializationCache(org.apache.kafka.common.protocol.ObjectSerializationCache) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) ByteBuffer(java.nio.ByteBuffer)

Aggregations

ByteBufferAccessor (org.apache.kafka.common.protocol.ByteBufferAccessor)39 ByteBuffer (java.nio.ByteBuffer)24 Test (org.junit.jupiter.api.Test)23 ObjectSerializationCache (org.apache.kafka.common.protocol.ObjectSerializationCache)13 TopicPartition (org.apache.kafka.common.TopicPartition)6 ArrayList (java.util.ArrayList)5 HashMap (java.util.HashMap)4 Uuid (org.apache.kafka.common.Uuid)4 UnsupportedVersionException (org.apache.kafka.common.errors.UnsupportedVersionException)4 Collections (java.util.Collections)3 List (java.util.List)3 Map (java.util.Map)3 LeaderChangeMessage (org.apache.kafka.common.message.LeaderChangeMessage)3 UpdateMetadataEndpoint (org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint)3 Send (org.apache.kafka.common.network.Send)3 BufferUnderflowException (java.nio.BufferUnderflowException)2 Arrays.asList (java.util.Arrays.asList)2 Collections.emptyList (java.util.Collections.emptyList)2 HashSet (java.util.HashSet)2 Set (java.util.Set)2