use of org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroupMember in project kafka by apache.
the class MessageTest method testThrottleTimeIgnorableInDescribeGroupsResponse.
@Test
public void testThrottleTimeIgnorableInDescribeGroupsResponse() throws Exception {
DescribeGroupsResponseData responseWithGroupInstanceId = new DescribeGroupsResponseData().setGroups(Collections.singletonList(new DescribedGroup().setGroupId("group").setGroupState("Stable").setErrorCode(Errors.NONE.code()).setMembers(Collections.singletonList(new DescribedGroupMember().setMemberId(memberId))).setProtocolType("consumer"))).setThrottleTimeMs(10);
DescribeGroupsResponseData expectedResponse = responseWithGroupInstanceId.duplicate();
// Unset throttle time
expectedResponse.setThrottleTimeMs(0);
testAllMessageRoundTripsBeforeVersion((short) 1, responseWithGroupInstanceId, expectedResponse);
}
use of org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroupMember in project kafka by apache.
the class MessageTest method testDescribeGroupsResponseVersions.
@Test
public void testDescribeGroupsResponseVersions() throws Exception {
DescribedGroupMember baseMember = new DescribedGroupMember().setMemberId(memberId);
DescribedGroup baseGroup = new DescribedGroup().setGroupId("group").setGroupState("Stable").setErrorCode(Errors.NONE.code()).setMembers(Collections.singletonList(baseMember)).setProtocolType("consumer");
DescribeGroupsResponseData baseResponse = new DescribeGroupsResponseData().setGroups(Collections.singletonList(baseGroup));
testAllMessageRoundTrips(baseResponse);
testAllMessageRoundTripsFromVersion((short) 1, baseResponse.setThrottleTimeMs(10));
baseGroup.setAuthorizedOperations(1);
testAllMessageRoundTripsFromVersion((short) 3, baseResponse);
baseMember.setGroupInstanceId(instanceId);
testAllMessageRoundTripsFromVersion((short) 4, baseResponse);
}
use of org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroupMember in project kafka by apache.
the class MessageTest method testGroupInstanceIdIgnorableInDescribeGroupsResponse.
@Test
public void testGroupInstanceIdIgnorableInDescribeGroupsResponse() throws Exception {
DescribeGroupsResponseData responseWithGroupInstanceId = new DescribeGroupsResponseData().setGroups(Collections.singletonList(new DescribedGroup().setGroupId("group").setGroupState("Stable").setErrorCode(Errors.NONE.code()).setMembers(Collections.singletonList(new DescribedGroupMember().setMemberId(memberId).setGroupInstanceId(instanceId))).setProtocolType("consumer")));
DescribeGroupsResponseData expectedResponse = responseWithGroupInstanceId.duplicate();
// Unset GroupInstanceId
expectedResponse.groups().get(0).members().get(0).setGroupInstanceId(null);
testAllMessageRoundTripsBeforeVersion((short) 4, responseWithGroupInstanceId, expectedResponse);
}
use of org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroupMember in project kafka by apache.
the class KafkaAdminClientTest method prepareDescribeGroupsResponseData.
private static DescribeGroupsResponseData prepareDescribeGroupsResponseData(String groupId, List<String> groupInstances, List<TopicPartition> topicPartitions) {
final ByteBuffer memberAssignment = ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(topicPartitions));
List<DescribedGroupMember> describedGroupMembers = groupInstances.stream().map(groupInstance -> DescribeGroupsResponse.groupMember(JoinGroupRequest.UNKNOWN_MEMBER_ID, groupInstance, "clientId0", "clientHost", new byte[memberAssignment.remaining()], null)).collect(Collectors.toList());
DescribeGroupsResponseData data = new DescribeGroupsResponseData();
data.groups().add(DescribeGroupsResponse.groupMetadata(groupId, Errors.NONE, "", ConsumerProtocol.PROTOCOL_TYPE, "", describedGroupMembers, Collections.emptySet()));
return data;
}
use of org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroupMember in project kafka by apache.
the class KafkaAdminClientTest method testDescribeConsumerGroups.
@Test
public void testDescribeConsumerGroups() throws Exception {
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
// Retriable FindCoordinatorResponse errors should be retried
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode()));
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode()));
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
DescribeGroupsResponseData data = new DescribeGroupsResponseData();
// Retriable errors should be retried
data.groups().add(DescribeGroupsResponse.groupMetadata(GROUP_ID, Errors.COORDINATOR_LOAD_IN_PROGRESS, "", "", "", Collections.emptyList(), Collections.emptySet()));
env.kafkaClient().prepareResponse(new DescribeGroupsResponse(data));
/*
* We need to return two responses here, one with NOT_COORDINATOR error when calling describe consumer group
* api using coordinator that has moved. This will retry whole operation. So we need to again respond with a
* FindCoordinatorResponse.
*
* And the same reason for COORDINATOR_NOT_AVAILABLE error response
*/
data = new DescribeGroupsResponseData();
data.groups().add(DescribeGroupsResponse.groupMetadata(GROUP_ID, Errors.NOT_COORDINATOR, "", "", "", Collections.emptyList(), Collections.emptySet()));
env.kafkaClient().prepareResponse(new DescribeGroupsResponse(data));
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
data = new DescribeGroupsResponseData();
data.groups().add(DescribeGroupsResponse.groupMetadata(GROUP_ID, Errors.COORDINATOR_NOT_AVAILABLE, "", "", "", Collections.emptyList(), Collections.emptySet()));
env.kafkaClient().prepareResponse(new DescribeGroupsResponse(data));
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
data = new DescribeGroupsResponseData();
TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0);
TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1);
TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2);
final List<TopicPartition> topicPartitions = new ArrayList<>();
topicPartitions.add(0, myTopicPartition0);
topicPartitions.add(1, myTopicPartition1);
topicPartitions.add(2, myTopicPartition2);
final ByteBuffer memberAssignment = ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(topicPartitions));
byte[] memberAssignmentBytes = new byte[memberAssignment.remaining()];
memberAssignment.get(memberAssignmentBytes);
DescribedGroupMember memberOne = DescribeGroupsResponse.groupMember("0", "instance1", "clientId0", "clientHost", memberAssignmentBytes, null);
DescribedGroupMember memberTwo = DescribeGroupsResponse.groupMember("1", "instance2", "clientId1", "clientHost", memberAssignmentBytes, null);
List<MemberDescription> expectedMemberDescriptions = new ArrayList<>();
expectedMemberDescriptions.add(convertToMemberDescriptions(memberOne, new MemberAssignment(new HashSet<>(topicPartitions))));
expectedMemberDescriptions.add(convertToMemberDescriptions(memberTwo, new MemberAssignment(new HashSet<>(topicPartitions))));
data.groups().add(DescribeGroupsResponse.groupMetadata(GROUP_ID, Errors.NONE, "", ConsumerProtocol.PROTOCOL_TYPE, "", asList(memberOne, memberTwo), Collections.emptySet()));
env.kafkaClient().prepareResponse(new DescribeGroupsResponse(data));
final DescribeConsumerGroupsResult result = env.adminClient().describeConsumerGroups(singletonList(GROUP_ID));
final ConsumerGroupDescription groupDescription = result.describedGroups().get(GROUP_ID).get();
assertEquals(1, result.describedGroups().size());
assertEquals(GROUP_ID, groupDescription.groupId());
assertEquals(2, groupDescription.members().size());
assertEquals(expectedMemberDescriptions, groupDescription.members());
}
}
Aggregations