use of org.apache.kafka.clients.consumer.internals.PartitionAssignor.Subscription in project kafka by apache.
the class ConsumerCoordinator method performAssignment.
@Override
protected Map<String, ByteBuffer> performAssignment(String leaderId, String assignmentStrategy, Map<String, ByteBuffer> allSubscriptions) {
PartitionAssignor assignor = lookupAssignor(assignmentStrategy);
if (assignor == null)
throw new IllegalStateException("Coordinator selected invalid assignment protocol: " + assignmentStrategy);
Set<String> allSubscribedTopics = new HashSet<>();
Map<String, Subscription> subscriptions = new HashMap<>();
for (Map.Entry<String, ByteBuffer> subscriptionEntry : allSubscriptions.entrySet()) {
Subscription subscription = ConsumerProtocol.deserializeSubscription(subscriptionEntry.getValue());
subscriptions.put(subscriptionEntry.getKey(), subscription);
allSubscribedTopics.addAll(subscription.topics());
}
// the leader will begin watching for changes to any of the topics the group is interested in,
// which ensures that all metadata changes will eventually be seen
this.subscriptions.groupSubscribe(allSubscribedTopics);
metadata.setTopics(this.subscriptions.groupSubscription());
// update metadata (if needed) and keep track of the metadata used for assignment so that
// we can check after rebalance completion whether anything has changed
client.ensureFreshMetadata();
isLeader = true;
log.debug("Performing assignment for group {} using strategy {} with subscriptions {}", groupId, assignor.name(), subscriptions);
Map<String, Assignment> assignment = assignor.assign(metadata.fetch(), subscriptions);
// user-customized assignor may have created some topics that are not in the subscription list
// and assign their partitions to the members; in this case we would like to update the leader's
// own metadata with the newly added topics so that it will not trigger a subsequent rebalance
// when these topics gets updated from metadata refresh.
//
// TODO: this is a hack and not something we want to support long-term unless we push regex into the protocol
// we may need to modify the PartitionAssingor API to better support this case.
Set<String> assignedTopics = new HashSet<>();
for (Assignment assigned : assignment.values()) {
for (TopicPartition tp : assigned.partitions()) assignedTopics.add(tp.topic());
}
if (!assignedTopics.containsAll(allSubscribedTopics)) {
Set<String> notAssignedTopics = new HashSet<>(allSubscribedTopics);
notAssignedTopics.removeAll(assignedTopics);
log.warn("The following subscribed topics are not assigned to any members in the group {} : {} ", groupId, notAssignedTopics);
}
if (!allSubscribedTopics.containsAll(assignedTopics)) {
Set<String> newlyAddedTopics = new HashSet<>(assignedTopics);
newlyAddedTopics.removeAll(allSubscribedTopics);
log.info("The following not-subscribed topics are assigned to group {}, and their metadata will be " + "fetched from the brokers : {}", groupId, newlyAddedTopics);
allSubscribedTopics.addAll(assignedTopics);
this.subscriptions.groupSubscribe(allSubscribedTopics);
metadata.setTopics(this.subscriptions.groupSubscription());
client.ensureFreshMetadata();
}
assignmentSnapshot = metadataSnapshot;
log.debug("Finished assignment for group {}: {}", groupId, assignment);
Map<String, ByteBuffer> groupAssignment = new HashMap<>();
for (Map.Entry<String, Assignment> assignmentEntry : assignment.entrySet()) {
ByteBuffer buffer = ConsumerProtocol.serializeAssignment(assignmentEntry.getValue());
groupAssignment.put(assignmentEntry.getKey(), buffer);
}
return groupAssignment;
}
use of org.apache.kafka.clients.consumer.internals.PartitionAssignor.Subscription in project kafka by apache.
the class ConsumerProtocolTest method deserializeNewSubscriptionVersion.
@Test
public void deserializeNewSubscriptionVersion() {
// verify that a new version which adds a field is still parseable
short version = 100;
Schema subscriptionSchemaV100 = new Schema(new Field(ConsumerProtocol.TOPICS_KEY_NAME, new ArrayOf(Type.STRING)), new Field(ConsumerProtocol.USER_DATA_KEY_NAME, Type.BYTES), new Field("foo", Type.STRING));
Struct subscriptionV100 = new Struct(subscriptionSchemaV100);
subscriptionV100.set(ConsumerProtocol.TOPICS_KEY_NAME, new Object[] { "topic" });
subscriptionV100.set(ConsumerProtocol.USER_DATA_KEY_NAME, ByteBuffer.wrap(new byte[0]));
subscriptionV100.set("foo", "bar");
Struct headerV100 = new Struct(ConsumerProtocol.CONSUMER_PROTOCOL_HEADER_SCHEMA);
headerV100.set(ConsumerProtocol.VERSION_KEY_NAME, version);
ByteBuffer buffer = ByteBuffer.allocate(subscriptionV100.sizeOf() + headerV100.sizeOf());
headerV100.writeTo(buffer);
subscriptionV100.writeTo(buffer);
buffer.flip();
Subscription subscription = ConsumerProtocol.deserializeSubscription(buffer);
assertEquals(Arrays.asList("topic"), subscription.topics());
}
use of org.apache.kafka.clients.consumer.internals.PartitionAssignor.Subscription in project apache-kafka-on-k8s by banzaicloud.
the class RangeAssignorTest method testMultipleConsumersMixedTopics.
@Test
public void testMultipleConsumersMixedTopics() {
String topic1 = "topic1";
String topic2 = "topic2";
String consumer1 = "consumer1";
String consumer2 = "consumer2";
String consumer3 = "consumer3";
Map<String, Integer> partitionsPerTopic = new HashMap<>();
partitionsPerTopic.put(topic1, 3);
partitionsPerTopic.put(topic2, 2);
Map<String, Subscription> consumers = new HashMap<>();
consumers.put(consumer1, new Subscription(topics(topic1)));
consumers.put(consumer2, new Subscription(topics(topic1, topic2)));
consumers.put(consumer3, new Subscription(topics(topic1)));
Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers);
assertAssignment(partitions(tp(topic1, 0)), assignment.get(consumer1));
assertAssignment(partitions(tp(topic1, 1), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumer2));
assertAssignment(partitions(tp(topic1, 2)), assignment.get(consumer3));
}
use of org.apache.kafka.clients.consumer.internals.PartitionAssignor.Subscription in project apache-kafka-on-k8s by banzaicloud.
the class RangeAssignorTest method testTwoConsumersTwoTopicsSixPartitions.
@Test
public void testTwoConsumersTwoTopicsSixPartitions() {
String topic1 = "topic1";
String topic2 = "topic2";
String consumer1 = "consumer1";
String consumer2 = "consumer2";
Map<String, Integer> partitionsPerTopic = new HashMap<>();
partitionsPerTopic.put(topic1, 3);
partitionsPerTopic.put(topic2, 3);
Map<String, Subscription> consumers = new HashMap<>();
consumers.put(consumer1, new Subscription(topics(topic1, topic2)));
consumers.put(consumer2, new Subscription(topics(topic1, topic2)));
Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers);
assertAssignment(partitions(tp(topic1, 0), tp(topic1, 1), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumer1));
assertAssignment(partitions(tp(topic1, 2), tp(topic2, 2)), assignment.get(consumer2));
}
use of org.apache.kafka.clients.consumer.internals.PartitionAssignor.Subscription in project apache-kafka-on-k8s by banzaicloud.
the class RoundRobinAssignorTest method testTwoConsumersTwoTopicsSixPartitions.
@Test
public void testTwoConsumersTwoTopicsSixPartitions() {
String topic1 = "topic1";
String topic2 = "topic2";
String consumer1 = "consumer1";
String consumer2 = "consumer2";
Map<String, Integer> partitionsPerTopic = new HashMap<>();
partitionsPerTopic.put(topic1, 3);
partitionsPerTopic.put(topic2, 3);
Map<String, Subscription> consumers = new HashMap<>();
consumers.put(consumer1, new Subscription(topics(topic1, topic2)));
consumers.put(consumer2, new Subscription(topics(topic1, topic2)));
Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers);
assertEquals(partitions(tp(topic1, 0), tp(topic1, 2), tp(topic2, 1)), assignment.get(consumer1));
assertEquals(partitions(tp(topic1, 1), tp(topic2, 0), tp(topic2, 2)), assignment.get(consumer2));
}
Aggregations