use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class ConsumerProtocol method deserializeSubscription.
public static Subscription deserializeSubscription(final ByteBuffer buffer, short version) {
version = checkSubscriptionVersion(version);
try {
ConsumerProtocolSubscription data = new ConsumerProtocolSubscription(new ByteBufferAccessor(buffer), version);
List<TopicPartition> ownedPartitions = new ArrayList<>();
for (ConsumerProtocolSubscription.TopicPartition tp : data.ownedPartitions()) {
for (Integer partition : tp.partitions()) {
ownedPartitions.add(new TopicPartition(tp.topic(), partition));
}
}
return new Subscription(data.topics(), data.userData() != null ? data.userData().duplicate() : null, ownedPartitions);
} catch (BufferUnderflowException e) {
throw new SchemaException("Buffer underflow while parsing consumer protocol's subscription", e);
}
}
use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class RequestResponseTest method testFetchRequestCompat.
@Test
public void testFetchRequestCompat() {
Map<TopicPartition, FetchRequest.PartitionData> fetchData = new HashMap<>();
fetchData.put(new TopicPartition("test", 0), new FetchRequest.PartitionData(Uuid.ZERO_UUID, 100, 2, 100, Optional.of(42)));
FetchRequest req = FetchRequest.Builder.forConsumer((short) 2, 100, 100, fetchData).metadata(new FetchMetadata(10, 20)).isolationLevel(IsolationLevel.READ_COMMITTED).build((short) 2);
FetchRequestData data = req.data();
ObjectSerializationCache cache = new ObjectSerializationCache();
int size = data.size(cache, (short) 2);
ByteBufferAccessor writer = new ByteBufferAccessor(ByteBuffer.allocate(size));
data.write(writer, cache, (short) 2);
}
use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class LeaderAndIsrRequestTest method testVersionLogic.
/**
* Verifies the logic we have in LeaderAndIsrRequest to present a unified interface across the various versions
* works correctly. For example, `LeaderAndIsrPartitionState.topicName` is not serialiazed/deserialized in
* recent versions, but we set it manually so that we can always present the ungrouped partition states
* independently of the version.
*/
@Test
public void testVersionLogic() {
for (short version : LEADER_AND_ISR.allVersions()) {
List<LeaderAndIsrPartitionState> partitionStates = asList(new LeaderAndIsrPartitionState().setTopicName("topic0").setPartitionIndex(0).setControllerEpoch(2).setLeader(0).setLeaderEpoch(10).setIsr(asList(0, 1)).setZkVersion(10).setReplicas(asList(0, 1, 2)).setAddingReplicas(asList(3)).setRemovingReplicas(asList(2)), new LeaderAndIsrPartitionState().setTopicName("topic0").setPartitionIndex(1).setControllerEpoch(2).setLeader(1).setLeaderEpoch(11).setIsr(asList(1, 2, 3)).setZkVersion(11).setReplicas(asList(1, 2, 3)).setAddingReplicas(emptyList()).setRemovingReplicas(emptyList()), new LeaderAndIsrPartitionState().setTopicName("topic1").setPartitionIndex(0).setControllerEpoch(2).setLeader(2).setLeaderEpoch(11).setIsr(asList(2, 3, 4)).setZkVersion(11).setReplicas(asList(2, 3, 4)).setAddingReplicas(emptyList()).setRemovingReplicas(emptyList()));
List<Node> liveNodes = asList(new Node(0, "host0", 9090), new Node(1, "host1", 9091));
Map<String, Uuid> topicIds = new HashMap<>();
topicIds.put("topic0", Uuid.randomUuid());
topicIds.put("topic1", Uuid.randomUuid());
LeaderAndIsrRequest request = new LeaderAndIsrRequest.Builder(version, 1, 2, 3, partitionStates, topicIds, liveNodes).build();
List<LeaderAndIsrLiveLeader> liveLeaders = liveNodes.stream().map(n -> new LeaderAndIsrLiveLeader().setBrokerId(n.id()).setHostName(n.host()).setPort(n.port())).collect(Collectors.toList());
assertEquals(new HashSet<>(partitionStates), iterableToSet(request.partitionStates()));
assertEquals(liveLeaders, request.liveLeaders());
assertEquals(1, request.controllerId());
assertEquals(2, request.controllerEpoch());
assertEquals(3, request.brokerEpoch());
ByteBuffer byteBuffer = request.serialize();
LeaderAndIsrRequest deserializedRequest = new LeaderAndIsrRequest(new LeaderAndIsrRequestData(new ByteBufferAccessor(byteBuffer), version), version);
// them for earlier versions.
if (version < 3) {
partitionStates.get(0).setAddingReplicas(emptyList()).setRemovingReplicas(emptyList());
}
// TopicStates is an empty map.
if (version < 2) {
topicIds = new HashMap<>();
}
// Zero Uuids in place.
if (version > 1 && version < 5) {
topicIds.put("topic0", Uuid.ZERO_UUID);
topicIds.put("topic1", Uuid.ZERO_UUID);
}
assertEquals(new HashSet<>(partitionStates), iterableToSet(deserializedRequest.partitionStates()));
assertEquals(topicIds, deserializedRequest.topicIds());
assertEquals(liveLeaders, deserializedRequest.liveLeaders());
assertEquals(1, request.controllerId());
assertEquals(2, request.controllerEpoch());
assertEquals(3, request.brokerEpoch());
}
}
use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class OffsetFetchResponseTest method testStructBuild.
/**
* Test behavior changes over the versions. Refer to resources.common.messages.OffsetFetchResponse.json
*/
@Test
public void testStructBuild() {
for (short version : ApiKeys.OFFSET_FETCH.allVersions()) {
if (version < 8) {
partitionDataMap.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData(offset, leaderEpochTwo, metadata, Errors.GROUP_AUTHORIZATION_FAILED));
OffsetFetchResponse latestResponse = new OffsetFetchResponse(throttleTimeMs, Errors.NONE, partitionDataMap);
OffsetFetchResponseData data = new OffsetFetchResponseData(new ByteBufferAccessor(latestResponse.serialize(version)), version);
OffsetFetchResponse oldResponse = new OffsetFetchResponse(data, version);
if (version <= 1) {
assertEquals(Errors.NONE.code(), data.errorCode());
// Partition level error populated in older versions.
assertEquals(Errors.GROUP_AUTHORIZATION_FAILED, oldResponse.error());
assertEquals(Utils.mkMap(Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 2), Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), oldResponse.errorCounts());
} else {
assertEquals(Errors.NONE.code(), data.errorCode());
assertEquals(Errors.NONE, oldResponse.error());
assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NONE, 1), Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 1), Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), oldResponse.errorCounts());
}
if (version <= 2) {
assertEquals(DEFAULT_THROTTLE_TIME, oldResponse.throttleTimeMs());
} else {
assertEquals(throttleTimeMs, oldResponse.throttleTimeMs());
}
Map<TopicPartition, PartitionData> expectedDataMap = new HashMap<>();
for (Map.Entry<TopicPartition, PartitionData> entry : partitionDataMap.entrySet()) {
PartitionData partitionData = entry.getValue();
expectedDataMap.put(entry.getKey(), new PartitionData(partitionData.offset, version <= 4 ? Optional.empty() : partitionData.leaderEpoch, partitionData.metadata, partitionData.error));
}
Map<TopicPartition, PartitionData> responseData = oldResponse.responseDataV0ToV7();
assertEquals(expectedDataMap, responseData);
responseData.forEach((tp, rdata) -> assertTrue(rdata.hasError()));
} else {
partitionDataMap.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData(offset, leaderEpochTwo, metadata, Errors.GROUP_AUTHORIZATION_FAILED));
OffsetFetchResponse latestResponse = new OffsetFetchResponse(throttleTimeMs, Collections.singletonMap(groupOne, Errors.NONE), Collections.singletonMap(groupOne, partitionDataMap));
OffsetFetchResponseData data = new OffsetFetchResponseData(new ByteBufferAccessor(latestResponse.serialize(version)), version);
OffsetFetchResponse oldResponse = new OffsetFetchResponse(data, version);
assertEquals(Errors.NONE.code(), data.groups().get(0).errorCode());
assertEquals(Errors.NONE, oldResponse.groupLevelError(groupOne));
assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NONE, 1), Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 1), Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), oldResponse.errorCounts());
assertEquals(throttleTimeMs, oldResponse.throttleTimeMs());
Map<TopicPartition, PartitionData> expectedDataMap = new HashMap<>();
for (Map.Entry<TopicPartition, PartitionData> entry : partitionDataMap.entrySet()) {
PartitionData partitionData = entry.getValue();
expectedDataMap.put(entry.getKey(), new PartitionData(partitionData.offset, partitionData.leaderEpoch, partitionData.metadata, partitionData.error));
}
Map<TopicPartition, PartitionData> responseData = oldResponse.partitionDataMap(groupOne);
assertEquals(expectedDataMap, responseData);
responseData.forEach((tp, rdata) -> assertTrue(rdata.hasError()));
}
}
}
use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class RequestHeaderTest method parseHeaderWithNullClientId.
@Test
public void parseHeaderWithNullClientId() {
RequestHeaderData headerData = new RequestHeaderData().setClientId(null).setCorrelationId(123).setRequestApiKey(ApiKeys.FIND_COORDINATOR.id).setRequestApiVersion((short) 10);
ObjectSerializationCache serializationCache = new ObjectSerializationCache();
ByteBuffer buffer = ByteBuffer.allocate(headerData.size(serializationCache, (short) 2));
headerData.write(new ByteBufferAccessor(buffer), serializationCache, (short) 2);
buffer.flip();
RequestHeader parsed = RequestHeader.parse(buffer);
assertEquals("", parsed.clientId());
assertEquals(123, parsed.correlationId());
assertEquals(ApiKeys.FIND_COORDINATOR, parsed.apiKey());
assertEquals((short) 10, parsed.apiVersion());
}
Aggregations