use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment in project kafka by apache.
the class ConsumerProtocol method deserializeAssignment.
public static Assignment deserializeAssignment(final ByteBuffer buffer, short version) {
version = checkAssignmentVersion(version);
try {
ConsumerProtocolAssignment data = new ConsumerProtocolAssignment(new ByteBufferAccessor(buffer), version);
List<TopicPartition> assignedPartitions = new ArrayList<>();
for (ConsumerProtocolAssignment.TopicPartition tp : data.assignedPartitions()) {
for (Integer partition : tp.partitions()) {
assignedPartitions.add(new TopicPartition(tp.topic(), partition));
}
}
return new Assignment(assignedPartitions, data.userData() != null ? data.userData().duplicate() : null);
} catch (BufferUnderflowException e) {
throw new SchemaException("Buffer underflow while parsing consumer protocol's assignment", e);
}
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment in project kafka by apache.
the class ConsumerCoordinator method onJoinComplete.
@Override
protected void onJoinComplete(int generation, String memberId, String assignmentStrategy, ByteBuffer assignmentBuffer) {
log.debug("Executing onJoinComplete with generation {} and memberId {}", generation, memberId);
// Only the leader is responsible for monitoring for metadata changes (i.e. partition changes)
if (!isLeader)
assignmentSnapshot = null;
ConsumerPartitionAssignor assignor = lookupAssignor(assignmentStrategy);
if (assignor == null)
throw new IllegalStateException("Coordinator selected invalid assignment protocol: " + assignmentStrategy);
// Give the assignor a chance to update internal state based on the received assignment
groupMetadata = new ConsumerGroupMetadata(rebalanceConfig.groupId, generation, memberId, rebalanceConfig.groupInstanceId);
SortedSet<TopicPartition> ownedPartitions = new TreeSet<>(COMPARATOR);
ownedPartitions.addAll(subscriptions.assignedPartitions());
// should at least encode the short version
if (assignmentBuffer.remaining() < 2)
throw new IllegalStateException("There are insufficient bytes available to read assignment from the sync-group response (" + "actual byte size " + assignmentBuffer.remaining() + ") , this is not expected; " + "it is possible that the leader's assign function is buggy and did not return any assignment for this member, " + "or because static member is configured and the protocol is buggy hence did not get the assignment for this member");
Assignment assignment = ConsumerProtocol.deserializeAssignment(assignmentBuffer);
SortedSet<TopicPartition> assignedPartitions = new TreeSet<>(COMPARATOR);
assignedPartitions.addAll(assignment.partitions());
if (!subscriptions.checkAssignmentMatchedSubscription(assignedPartitions)) {
final String reason = String.format("received assignment %s does not match the current subscription %s; " + "it is likely that the subscription has changed since we joined the group, will re-join with current subscription", assignment.partitions(), subscriptions.prettyString());
requestRejoin(reason);
return;
}
final AtomicReference<Exception> firstException = new AtomicReference<>(null);
SortedSet<TopicPartition> addedPartitions = new TreeSet<>(COMPARATOR);
addedPartitions.addAll(assignedPartitions);
addedPartitions.removeAll(ownedPartitions);
if (protocol == RebalanceProtocol.COOPERATIVE) {
SortedSet<TopicPartition> revokedPartitions = new TreeSet<>(COMPARATOR);
revokedPartitions.addAll(ownedPartitions);
revokedPartitions.removeAll(assignedPartitions);
log.info("Updating assignment with\n" + "\tAssigned partitions: {}\n" + "\tCurrent owned partitions: {}\n" + "\tAdded partitions (assigned - owned): {}\n" + "\tRevoked partitions (owned - assigned): {}\n", assignedPartitions, ownedPartitions, addedPartitions, revokedPartitions);
if (!revokedPartitions.isEmpty()) {
// Revoke partitions that were previously owned but no longer assigned;
// note that we should only change the assignment (or update the assignor's state)
// AFTER we've triggered the revoke callback
firstException.compareAndSet(null, invokePartitionsRevoked(revokedPartitions));
// If revoked any partitions, need to re-join the group afterwards
final String reason = String.format("need to revoke partitions %s as indicated " + "by the current assignment and re-join", revokedPartitions);
requestRejoin(reason);
}
}
// The leader may have assigned partitions which match our subscription pattern, but which
// were not explicitly requested, so we update the joined subscription here.
maybeUpdateJoinedSubscription(assignedPartitions);
// Catch any exception here to make sure we could complete the user callback.
firstException.compareAndSet(null, invokeOnAssignment(assignor, assignment));
// Reschedule the auto commit starting from now
if (autoCommitEnabled)
this.nextAutoCommitTimer.updateAndReset(autoCommitIntervalMs);
subscriptions.assignFromSubscribed(assignedPartitions);
// Add partitions that were not previously owned but are now assigned
firstException.compareAndSet(null, invokePartitionsAssigned(addedPartitions));
if (firstException.get() != null) {
if (firstException.get() instanceof KafkaException) {
throw (KafkaException) firstException.get();
} else {
throw new KafkaException("User rebalance callback throws an error", firstException.get());
}
}
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment in project kafka by apache.
the class ConsumerProtocolTest method serializeDeserializeAssignment.
@Test
public void serializeDeserializeAssignment() {
List<TopicPartition> partitions = Arrays.asList(tp1, tp2);
ByteBuffer buffer = ConsumerProtocol.serializeAssignment(new Assignment(partitions, ByteBuffer.wrap(new byte[0])));
Assignment parsedAssignment = ConsumerProtocol.deserializeAssignment(buffer);
assertEquals(toSet(partitions), toSet(parsedAssignment.partitions()));
assertEquals(0, parsedAssignment.userData().limit());
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount.
@Test
public void shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount() {
builder = new CorruptedInternalTopologyBuilder();
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configProps()));
final InternalStreamsBuilder streamsBuilder = new InternalStreamsBuilder(builder);
final KStream<String, String> inputTopic = streamsBuilder.stream(singleton("topic1"), new ConsumedInternal<>());
final KTable<String, String> inputTable = streamsBuilder.table("topic2", new ConsumedInternal<>(), new MaterializedInternal<>(Materialized.as("store")));
inputTopic.groupBy((k, v) -> k, Grouped.with("GroupName", Serdes.String(), Serdes.String())).windowedBy(TimeWindows.of(Duration.ofMinutes(10))).aggregate(() -> "", (k, v, a) -> a + k).leftJoin(inputTable, v -> v, (x, y) -> x + y);
streamsBuilder.buildAndOptimizeTopology();
configureDefault();
subscriptions.put("consumer", new Subscription(singletonList("topic"), defaultSubscriptionInfo.encode()));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(AssignmentInfo.decode(assignments.get("consumer").userData()).errCode(), equalTo(AssignorError.ASSIGNMENT_ERROR.code()));
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment in project kafka by apache.
the class StreamsPartitionAssignorTest method testAssignBasic.
@Test
public void testAssignBasic() {
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1", "source2");
builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor");
final List<String> topics = asList("topic1", "topic2");
final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
final Set<TaskId> prevTasks10 = mkSet(TASK_0_0);
final Set<TaskId> prevTasks11 = mkSet(TASK_0_1);
final Set<TaskId> prevTasks20 = mkSet(TASK_0_2);
final Set<TaskId> standbyTasks10 = EMPTY_TASKS;
final Set<TaskId> standbyTasks11 = mkSet(TASK_0_2);
final Set<TaskId> standbyTasks20 = mkSet(TASK_0_0);
createMockTaskManager(prevTasks10, standbyTasks10);
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(singletonList(APPLICATION_ID + "-store-changelog"), singletonList(3)));
configureDefaultPartitionAssignor();
subscriptions.put("consumer10", new Subscription(topics, getInfo(UUID_1, prevTasks10, standbyTasks10).encode()));
subscriptions.put("consumer11", new Subscription(topics, getInfo(UUID_1, prevTasks11, standbyTasks11).encode()));
subscriptions.put("consumer20", new Subscription(topics, getInfo(UUID_2, prevTasks20, standbyTasks20).encode()));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
// check the assignment
assertEquals(mkSet(mkSet(t1p0, t2p0), mkSet(t1p1, t2p1)), mkSet(new HashSet<>(assignments.get("consumer10").partitions()), new HashSet<>(assignments.get("consumer11").partitions())));
assertEquals(mkSet(t1p2, t2p2), new HashSet<>(assignments.get("consumer20").partitions()));
// check assignment info
// the first consumer
final AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
final Set<TaskId> allActiveTasks = new HashSet<>(info10.activeTasks());
// the second consumer
final AssignmentInfo info11 = checkAssignment(allTopics, assignments.get("consumer11"));
allActiveTasks.addAll(info11.activeTasks());
assertEquals(mkSet(TASK_0_0, TASK_0_1), allActiveTasks);
// the third consumer
final AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
allActiveTasks.addAll(info20.activeTasks());
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, new HashSet<>(allActiveTasks));
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, allActiveTasks);
}
Aggregations