use of org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData in project kafka by apache.
the class OffsetFetchRequestTest method testConstructor.
@Test
public void testConstructor() {
List<TopicPartition> partitions = Arrays.asList(new TopicPartition(topicOne, partitionOne), new TopicPartition(topicTwo, partitionTwo));
int throttleTimeMs = 10;
Map<TopicPartition, PartitionData> expectedData = new HashMap<>();
for (TopicPartition partition : partitions) {
expectedData.put(partition, new PartitionData(OffsetFetchResponse.INVALID_OFFSET, Optional.empty(), OffsetFetchResponse.NO_METADATA, Errors.NONE));
}
for (short version : ApiKeys.OFFSET_FETCH.allVersions()) {
if (version < 8) {
builder = new OffsetFetchRequest.Builder(group1, false, partitions, false);
assertFalse(builder.isAllTopicPartitions());
OffsetFetchRequest request = builder.build(version);
assertFalse(request.isAllPartitions());
assertEquals(group1, request.groupId());
assertEquals(partitions, request.partitions());
OffsetFetchResponse response = request.getErrorResponse(throttleTimeMs, Errors.NONE);
assertEquals(Errors.NONE, response.error());
assertFalse(response.hasError());
assertEquals(Collections.singletonMap(Errors.NONE, version <= (short) 1 ? 3 : 1), response.errorCounts(), "Incorrect error count for version " + version);
if (version <= 1) {
assertEquals(expectedData, response.responseDataV0ToV7());
}
if (version >= 3) {
assertEquals(throttleTimeMs, response.throttleTimeMs());
} else {
assertEquals(DEFAULT_THROTTLE_TIME, response.throttleTimeMs());
}
} else {
builder = new Builder(Collections.singletonMap(group1, partitions), false, false);
OffsetFetchRequest request = builder.build(version);
Map<String, List<TopicPartition>> groupToPartitionMap = request.groupIdsToPartitions();
Map<String, List<OffsetFetchRequestTopics>> groupToTopicMap = request.groupIdsToTopics();
assertFalse(request.isAllPartitionsForGroup(group1));
assertTrue(groupToPartitionMap.containsKey(group1) && groupToTopicMap.containsKey(group1));
assertEquals(partitions, groupToPartitionMap.get(group1));
OffsetFetchResponse response = request.getErrorResponse(throttleTimeMs, Errors.NONE);
assertEquals(Errors.NONE, response.groupLevelError(group1));
assertFalse(response.groupHasError(group1));
assertEquals(Collections.singletonMap(Errors.NONE, 1), response.errorCounts(), "Incorrect error count for version " + version);
assertEquals(throttleTimeMs, response.throttleTimeMs());
}
}
}
use of org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData in project kafka by apache.
the class ListConsumerGroupOffsetsHandlerTest method buildResponse.
private OffsetFetchResponse buildResponse(Errors error) {
Map<TopicPartition, PartitionData> responseData = new HashMap<>();
OffsetFetchResponse response = new OffsetFetchResponse(error, responseData);
return response;
}
use of org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData in project kafka by apache.
the class ListConsumerGroupOffsetsHandlerTest method buildResponseWithPartitionError.
private OffsetFetchResponse buildResponseWithPartitionError(Errors error) {
Map<TopicPartition, PartitionData> responseData = new HashMap<>();
responseData.put(t0p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", Errors.NONE));
responseData.put(t0p1, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error));
OffsetFetchResponse response = new OffsetFetchResponse(Errors.NONE, responseData);
return response;
}
use of org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData in project kafka by apache.
the class OffsetFetchResponseTest method testStructBuild.
/**
* Test behavior changes over the versions. Refer to resources.common.messages.OffsetFetchResponse.json
*/
@Test
public void testStructBuild() {
for (short version : ApiKeys.OFFSET_FETCH.allVersions()) {
if (version < 8) {
partitionDataMap.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData(offset, leaderEpochTwo, metadata, Errors.GROUP_AUTHORIZATION_FAILED));
OffsetFetchResponse latestResponse = new OffsetFetchResponse(throttleTimeMs, Errors.NONE, partitionDataMap);
OffsetFetchResponseData data = new OffsetFetchResponseData(new ByteBufferAccessor(latestResponse.serialize(version)), version);
OffsetFetchResponse oldResponse = new OffsetFetchResponse(data, version);
if (version <= 1) {
assertEquals(Errors.NONE.code(), data.errorCode());
// Partition level error populated in older versions.
assertEquals(Errors.GROUP_AUTHORIZATION_FAILED, oldResponse.error());
assertEquals(Utils.mkMap(Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 2), Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), oldResponse.errorCounts());
} else {
assertEquals(Errors.NONE.code(), data.errorCode());
assertEquals(Errors.NONE, oldResponse.error());
assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NONE, 1), Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 1), Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), oldResponse.errorCounts());
}
if (version <= 2) {
assertEquals(DEFAULT_THROTTLE_TIME, oldResponse.throttleTimeMs());
} else {
assertEquals(throttleTimeMs, oldResponse.throttleTimeMs());
}
Map<TopicPartition, PartitionData> expectedDataMap = new HashMap<>();
for (Map.Entry<TopicPartition, PartitionData> entry : partitionDataMap.entrySet()) {
PartitionData partitionData = entry.getValue();
expectedDataMap.put(entry.getKey(), new PartitionData(partitionData.offset, version <= 4 ? Optional.empty() : partitionData.leaderEpoch, partitionData.metadata, partitionData.error));
}
Map<TopicPartition, PartitionData> responseData = oldResponse.responseDataV0ToV7();
assertEquals(expectedDataMap, responseData);
responseData.forEach((tp, rdata) -> assertTrue(rdata.hasError()));
} else {
partitionDataMap.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData(offset, leaderEpochTwo, metadata, Errors.GROUP_AUTHORIZATION_FAILED));
OffsetFetchResponse latestResponse = new OffsetFetchResponse(throttleTimeMs, Collections.singletonMap(groupOne, Errors.NONE), Collections.singletonMap(groupOne, partitionDataMap));
OffsetFetchResponseData data = new OffsetFetchResponseData(new ByteBufferAccessor(latestResponse.serialize(version)), version);
OffsetFetchResponse oldResponse = new OffsetFetchResponse(data, version);
assertEquals(Errors.NONE.code(), data.groups().get(0).errorCode());
assertEquals(Errors.NONE, oldResponse.groupLevelError(groupOne));
assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NONE, 1), Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 1), Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), oldResponse.errorCounts());
assertEquals(throttleTimeMs, oldResponse.throttleTimeMs());
Map<TopicPartition, PartitionData> expectedDataMap = new HashMap<>();
for (Map.Entry<TopicPartition, PartitionData> entry : partitionDataMap.entrySet()) {
PartitionData partitionData = entry.getValue();
expectedDataMap.put(entry.getKey(), new PartitionData(partitionData.offset, partitionData.leaderEpoch, partitionData.metadata, partitionData.error));
}
Map<TopicPartition, PartitionData> responseData = oldResponse.partitionDataMap(groupOne);
assertEquals(expectedDataMap, responseData);
responseData.forEach((tp, rdata) -> assertTrue(rdata.hasError()));
}
}
}
use of org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData in project kafka by apache.
the class OffsetFetchResponseTest method testUseDefaultLeaderEpochV0ToV7.
@Test
public void testUseDefaultLeaderEpochV0ToV7() {
final Optional<Integer> emptyLeaderEpoch = Optional.empty();
partitionDataMap.clear();
partitionDataMap.put(new TopicPartition(topicOne, partitionOne), new PartitionData(offset, emptyLeaderEpoch, metadata, Errors.UNKNOWN_TOPIC_OR_PARTITION));
OffsetFetchResponse response = new OffsetFetchResponse(throttleTimeMs, Errors.NOT_COORDINATOR, partitionDataMap);
OffsetFetchResponseData expectedData = new OffsetFetchResponseData().setErrorCode(Errors.NOT_COORDINATOR.code()).setThrottleTimeMs(throttleTimeMs).setTopics(Collections.singletonList(new OffsetFetchResponseTopic().setName(topicOne).setPartitions(Collections.singletonList(new OffsetFetchResponsePartition().setPartitionIndex(partitionOne).setCommittedOffset(offset).setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH).setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()).setMetadata(metadata)))));
assertEquals(expectedData, response.data());
}
Aggregations