use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class RequestResponseTest method verifyFetchResponseFullWrite.
private void verifyFetchResponseFullWrite(short version, FetchResponse fetchResponse) throws Exception {
int correlationId = 15;
short responseHeaderVersion = FETCH.responseHeaderVersion(version);
Send send = fetchResponse.toSend(new ResponseHeader(correlationId, responseHeaderVersion), version);
ByteBufferChannel channel = new ByteBufferChannel(send.size());
send.writeTo(channel);
channel.close();
ByteBuffer buf = channel.buffer();
// read the size
int size = buf.getInt();
assertTrue(size > 0);
// read the header
ResponseHeader responseHeader = ResponseHeader.parse(channel.buffer(), responseHeaderVersion);
assertEquals(correlationId, responseHeader.correlationId());
assertEquals(fetchResponse.serialize(version), buf);
FetchResponseData deserialized = new FetchResponseData(new ByteBufferAccessor(buf), version);
ObjectSerializationCache serializationCache = new ObjectSerializationCache();
assertEquals(size, responseHeader.size(serializationCache) + deserialized.size(serializationCache, version));
}
use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class UpdateMetadataRequestTest method testVersionLogic.
/**
* Verifies the logic we have in UpdateMetadataRequest to present a unified interface across the various versions
* works correctly. For example, `UpdateMetadataPartitionState.topicName` is not serialiazed/deserialized in
* recent versions, but we set it manually so that we can always present the ungrouped partition states
* independently of the version.
*/
@Test
public void testVersionLogic() {
String topic0 = "topic0";
String topic1 = "topic1";
for (short version : UPDATE_METADATA.allVersions()) {
List<UpdateMetadataPartitionState> partitionStates = asList(new UpdateMetadataPartitionState().setTopicName(topic0).setPartitionIndex(0).setControllerEpoch(2).setLeader(0).setLeaderEpoch(10).setIsr(asList(0, 1)).setZkVersion(10).setReplicas(asList(0, 1, 2)).setOfflineReplicas(asList(2)), new UpdateMetadataPartitionState().setTopicName(topic0).setPartitionIndex(1).setControllerEpoch(2).setLeader(1).setLeaderEpoch(11).setIsr(asList(1, 2, 3)).setZkVersion(11).setReplicas(asList(1, 2, 3)).setOfflineReplicas(emptyList()), new UpdateMetadataPartitionState().setTopicName(topic1).setPartitionIndex(0).setControllerEpoch(2).setLeader(2).setLeaderEpoch(11).setIsr(asList(2, 3)).setZkVersion(11).setReplicas(asList(2, 3, 4)).setOfflineReplicas(emptyList()));
List<UpdateMetadataEndpoint> broker0Endpoints = new ArrayList<>();
broker0Endpoints.add(new UpdateMetadataEndpoint().setHost("host0").setPort(9090).setSecurityProtocol(SecurityProtocol.PLAINTEXT.id));
// Non plaintext endpoints are only supported from version 1
if (version >= 1) {
broker0Endpoints.add(new UpdateMetadataEndpoint().setHost("host0").setPort(9091).setSecurityProtocol(SecurityProtocol.SSL.id));
}
// Custom listeners are only supported from version 3
if (version >= 3) {
broker0Endpoints.get(0).setListener("listener0");
broker0Endpoints.get(1).setListener("listener1");
}
List<UpdateMetadataBroker> liveBrokers = asList(new UpdateMetadataBroker().setId(0).setRack("rack0").setEndpoints(broker0Endpoints), new UpdateMetadataBroker().setId(1).setEndpoints(asList(new UpdateMetadataEndpoint().setHost("host1").setPort(9090).setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setListener("PLAINTEXT"))));
Map<String, Uuid> topicIds = new HashMap<>();
topicIds.put(topic0, Uuid.randomUuid());
topicIds.put(topic1, Uuid.randomUuid());
UpdateMetadataRequest request = new UpdateMetadataRequest.Builder(version, 1, 2, 3, partitionStates, liveBrokers, topicIds).build();
assertEquals(new HashSet<>(partitionStates), iterableToSet(request.partitionStates()));
assertEquals(liveBrokers, request.liveBrokers());
assertEquals(1, request.controllerId());
assertEquals(2, request.controllerEpoch());
assertEquals(3, request.brokerEpoch());
ByteBuffer byteBuffer = request.serialize();
UpdateMetadataRequest deserializedRequest = new UpdateMetadataRequest(new UpdateMetadataRequestData(new ByteBufferAccessor(byteBuffer), version), version);
// Rack is only supported from version 2
if (version < 2) {
for (UpdateMetadataBroker liveBroker : liveBrokers) liveBroker.setRack("");
}
// Non plaintext listener name is only supported from version 3
if (version < 3) {
for (UpdateMetadataBroker liveBroker : liveBrokers) {
for (UpdateMetadataEndpoint endpoint : liveBroker.endpoints()) {
SecurityProtocol securityProtocol = SecurityProtocol.forId(endpoint.securityProtocol());
endpoint.setListener(ListenerName.forSecurityProtocol(securityProtocol).value());
}
}
}
// Offline replicas are only supported from version 4
if (version < 4)
partitionStates.get(0).setOfflineReplicas(emptyList());
assertEquals(new HashSet<>(partitionStates), iterableToSet(deserializedRequest.partitionStates()));
assertEquals(liveBrokers, deserializedRequest.liveBrokers());
assertEquals(1, deserializedRequest.controllerId());
assertEquals(2, deserializedRequest.controllerEpoch());
// Broker epoch is only supported from version 5
if (version >= 5)
assertEquals(3, deserializedRequest.brokerEpoch());
else
assertEquals(-1, deserializedRequest.brokerEpoch());
long topicIdCount = deserializedRequest.data().topicStates().stream().map(UpdateMetadataRequestData.UpdateMetadataTopicState::topicId).filter(topicId -> topicId != Uuid.ZERO_UUID).count();
if (version >= 7)
assertEquals(2, topicIdCount);
else
assertEquals(0, topicIdCount);
}
}
use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class MessageTest method testByteBufferRoundTrip.
private void testByteBufferRoundTrip(short version, Message message, Message expected) throws Exception {
ObjectSerializationCache cache = new ObjectSerializationCache();
int size = message.size(cache, version);
ByteBuffer buf = ByteBuffer.allocate(size);
ByteBufferAccessor byteBufferAccessor = new ByteBufferAccessor(buf);
message.write(byteBufferAccessor, cache, version);
assertEquals(size, buf.position(), "The result of the size function does not match the number of bytes " + "written for version " + version);
Message message2 = message.getClass().getConstructor().newInstance();
buf.flip();
message2.read(byteBufferAccessor, version);
assertEquals(size, buf.position(), "The result of the size function does not match the number of bytes " + "read back in for version " + version);
assertEquals(expected, message2, "The message object created after a round trip did not match for " + "version " + version);
assertEquals(expected.hashCode(), message2.hashCode());
assertEquals(expected.toString(), message2.toString());
}
use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class MessageTest method verifyWriteSucceeds.
private void verifyWriteSucceeds(short version, Message message) {
ObjectSerializationCache cache = new ObjectSerializationCache();
int size = message.size(cache, version);
ByteBuffer buf = ByteBuffer.allocate(size * 2);
ByteBufferAccessor byteBufferAccessor = new ByteBufferAccessor(buf);
message.write(byteBufferAccessor, cache, version);
assertEquals(size, buf.position(), "Expected the serialized size to be " + size + ", but it was " + buf.position());
}
use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class MessageTest method verifyWriteRaisesNpe.
private void verifyWriteRaisesNpe(short version, Message message) {
ObjectSerializationCache cache = new ObjectSerializationCache();
assertThrows(NullPointerException.class, () -> {
int size = message.size(cache, version);
ByteBuffer buf = ByteBuffer.allocate(size);
ByteBufferAccessor byteBufferAccessor = new ByteBufferAccessor(buf);
message.write(byteBufferAccessor, cache, version);
});
}
Aggregations