use of org.apache.kafka.common.requests.SyncGroupRequest in project kafka by apache.
the class ConsumerCoordinatorTest method testPatternJoinGroupLeader.
@Test
public void testPatternJoinGroupLeader() {
final String consumerId = "leader";
final List<TopicPartition> assigned = Arrays.asList(t1p, t2p);
final List<TopicPartition> owned = Collections.emptyList();
subscriptions.subscribe(Pattern.compile("test.*"), rebalanceListener);
// partially update the metadata with one topic first,
// let the leader to refresh metadata during assignment
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 1)));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// normal join group
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
partitionAssignor.prepare(singletonMap(consumerId, assigned));
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) && sync.data().generationId() == 1 && sync.groupAssignments().containsKey(consumerId);
}, syncGroupResponse(assigned, Errors.NONE));
// expect client to force updating the metadata, if yes gives it both topics
client.prepareMetadataUpdate(metadataResponse);
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(2, subscriptions.numAssignedPartitions());
assertEquals(2, subscriptions.metadataTopics().size());
assertEquals(2, subscriptions.subscription().size());
// callback not triggered at all since there's nothing to be revoked
assertEquals(0, rebalanceListener.revokedCount);
assertNull(rebalanceListener.revoked);
assertEquals(1, rebalanceListener.assignedCount);
assertEquals(getAdded(owned, assigned), rebalanceListener.assigned);
}
use of org.apache.kafka.common.requests.SyncGroupRequest in project kafka by apache.
the class ConsumerCoordinatorTest method testPatternJoinGroupFollower.
@Test
public void testPatternJoinGroupFollower() {
final Set<String> subscription = Utils.mkSet(topic1, topic2);
final List<TopicPartition> owned = Collections.emptyList();
final List<TopicPartition> assigned = Arrays.asList(t1p, t2p);
subscriptions.subscribe(Pattern.compile("test.*"), rebalanceListener);
// partially update the metadata with one topic first,
// let the leader to refresh metadata during assignment
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 1)));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// normal join group
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) && sync.data().generationId() == 1 && sync.groupAssignments().isEmpty();
}, syncGroupResponse(assigned, Errors.NONE));
// expect client to force updating the metadata, if yes gives it both topics
client.prepareMetadataUpdate(metadataResponse);
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(assigned.size(), subscriptions.numAssignedPartitions());
assertEquals(subscription, subscriptions.subscription());
assertEquals(0, rebalanceListener.revokedCount);
assertNull(rebalanceListener.revoked);
assertEquals(1, rebalanceListener.assignedCount);
assertEquals(getAdded(owned, assigned), rebalanceListener.assigned);
}
use of org.apache.kafka.common.requests.SyncGroupRequest in project kafka by apache.
the class ConsumerCoordinatorTest method testUpdateMetadataDuringRebalance.
@Test
public void testUpdateMetadataDuringRebalance() {
final String topic1 = "topic1";
final String topic2 = "topic2";
TopicPartition tp1 = new TopicPartition(topic1, 0);
TopicPartition tp2 = new TopicPartition(topic2, 0);
final String consumerId = "leader";
List<String> topics = Arrays.asList(topic1, topic2);
subscriptions.subscribe(new HashSet<>(topics), rebalanceListener);
// we only have metadata for one topic initially
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 1)));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// prepare initial rebalance
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, topics);
partitionAssignor.prepare(singletonMap(consumerId, Arrays.asList(tp1)));
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
if (sync.data().memberId().equals(consumerId) && sync.data().generationId() == 1 && sync.groupAssignments().containsKey(consumerId)) {
// trigger the metadata update including both topics after the sync group request has been sent
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put(topic1, 1);
topicPartitionCounts.put(topic2, 1);
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, topicPartitionCounts));
return true;
}
return false;
}, syncGroupResponse(Collections.singletonList(tp1), Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
// the metadata update should trigger a second rebalance
client.prepareResponse(joinGroupLeaderResponse(2, consumerId, memberSubscriptions, Errors.NONE));
client.prepareResponse(syncGroupResponse(Arrays.asList(tp1, tp2), Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(new HashSet<>(Arrays.asList(tp1, tp2)), subscriptions.assignedPartitions());
}
use of org.apache.kafka.common.requests.SyncGroupRequest in project kafka by apache.
the class ConsumerCoordinatorTest method prepareJoinAndSyncResponse.
private void prepareJoinAndSyncResponse(String consumerId, int generation, List<String> subscription, List<TopicPartition> assignment) {
partitionAssignor.prepare(singletonMap(consumerId, assignment));
client.prepareResponse(joinGroupLeaderResponse(generation, consumerId, singletonMap(consumerId, subscription), Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) && sync.data().generationId() == generation && sync.groupAssignments().containsKey(consumerId);
}, syncGroupResponse(assignment, Errors.NONE));
}
use of org.apache.kafka.common.requests.SyncGroupRequest in project kafka by apache.
the class ConsumerCoordinatorTest method testCommitOffsetRebalanceInProgress.
@Test
public void testCommitOffsetRebalanceInProgress() {
// we cannot retry if a rebalance occurs before the commit completed
final String consumerId = "leader";
subscriptions.subscribe(singleton(topic1), rebalanceListener);
// ensure metadata is up-to-date for leader
client.updateMetadata(metadataResponse);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// normal join group
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
partitionAssignor.prepare(singletonMap(consumerId, singletonList(t1p)));
coordinator.ensureActiveGroup(time.timer(0L));
assertTrue(coordinator.rejoinNeededOrPending());
assertNull(coordinator.generationIfStable());
// when the state is REBALANCING, we would not even send out the request but fail immediately
assertThrows(RebalanceInProgressException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L, "metadata")), time.timer(Long.MAX_VALUE)));
final Node coordinatorNode = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
client.respondFrom(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE), coordinatorNode);
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) && sync.data().generationId() == 1 && sync.groupAssignments().containsKey(consumerId);
}, syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
AbstractCoordinator.Generation expectedGeneration = new AbstractCoordinator.Generation(1, consumerId, partitionAssignor.name());
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(expectedGeneration, coordinator.generationIfStable());
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.REBALANCE_IN_PROGRESS);
assertThrows(RebalanceInProgressException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L, "metadata")), time.timer(Long.MAX_VALUE)));
assertTrue(coordinator.rejoinNeededOrPending());
assertEquals(expectedGeneration, coordinator.generationIfStable());
}
Aggregations