use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.
the class StreamsPartitionAssignorTest method testAssignWithStandbyReplicas.
@Test
public void testAssignWithStandbyReplicas() {
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1", "source2");
builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor");
final List<String> topics = asList("topic1", "topic2");
final Set<TopicPartition> allTopicPartitions = topics.stream().map(topic -> asList(new TopicPartition(topic, 0), new TopicPartition(topic, 1), new TopicPartition(topic, 2))).flatMap(Collection::stream).collect(Collectors.toSet());
final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
final Set<TaskId> prevTasks00 = mkSet(TASK_0_0);
final Set<TaskId> prevTasks01 = mkSet(TASK_0_1);
final Set<TaskId> prevTasks02 = mkSet(TASK_0_2);
final Set<TaskId> standbyTasks00 = mkSet(TASK_0_0);
final Set<TaskId> standbyTasks01 = mkSet(TASK_0_1);
final Set<TaskId> standbyTasks02 = mkSet(TASK_0_2);
createMockTaskManager(prevTasks00, standbyTasks01);
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(singletonList(APPLICATION_ID + "-store1-changelog"), singletonList(3)));
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1));
subscriptions.put("consumer10", new Subscription(topics, getInfo(UUID_1, prevTasks00, EMPTY_TASKS, USER_END_POINT).encode()));
subscriptions.put("consumer11", new Subscription(topics, getInfo(UUID_1, prevTasks01, standbyTasks02, USER_END_POINT).encode()));
subscriptions.put("consumer20", new Subscription(topics, getInfo(UUID_2, prevTasks02, standbyTasks00, OTHER_END_POINT).encode()));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
// the first consumer
final AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
final Set<TaskId> allActiveTasks = new HashSet<>(info10.activeTasks());
final Set<TaskId> allStandbyTasks = new HashSet<>(info10.standbyTasks().keySet());
// the second consumer
final AssignmentInfo info11 = checkAssignment(allTopics, assignments.get("consumer11"));
allActiveTasks.addAll(info11.activeTasks());
allStandbyTasks.addAll(info11.standbyTasks().keySet());
assertNotEquals("same processId has same set of standby tasks", info11.standbyTasks().keySet(), info10.standbyTasks().keySet());
// check active tasks assigned to the first client
assertEquals(mkSet(TASK_0_0, TASK_0_1), new HashSet<>(allActiveTasks));
assertEquals(mkSet(TASK_0_2), new HashSet<>(allStandbyTasks));
// the third consumer
final AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
allActiveTasks.addAll(info20.activeTasks());
allStandbyTasks.addAll(info20.standbyTasks().keySet());
// all task ids are in the active tasks and also in the standby tasks
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, allActiveTasks);
assertEquals(3, allStandbyTasks.size());
assertEquals(allTasks, allStandbyTasks);
// Check host partition assignments
final Map<HostInfo, Set<TopicPartition>> partitionsByHost = info10.partitionsByHost();
assertEquals(2, partitionsByHost.size());
assertEquals(allTopicPartitions, partitionsByHost.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()));
final Map<HostInfo, Set<TopicPartition>> standbyPartitionsByHost = info10.standbyPartitionByHost();
assertEquals(2, standbyPartitionsByHost.size());
assertEquals(allTopicPartitions, standbyPartitionsByHost.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()));
for (final HostInfo hostInfo : partitionsByHost.keySet()) {
assertTrue(Collections.disjoint(partitionsByHost.get(hostInfo), standbyPartitionsByHost.get(hostInfo)));
}
// All consumers got the same host info
assertEquals(partitionsByHost, info11.partitionsByHost());
assertEquals(partitionsByHost, info20.partitionsByHost());
assertEquals(standbyPartitionsByHost, info11.standbyPartitionByHost());
assertEquals(standbyPartitionsByHost, info20.standbyPartitionByHost());
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldThrowIllegalStateExceptionIfAnyPartitionsMissingFromChangelogEndOffsets.
@Test
public void shouldThrowIllegalStateExceptionIfAnyPartitionsMissingFromChangelogEndOffsets() {
final int changelogNumPartitions = 3;
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor1");
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(singletonList(APPLICATION_ID + "-store1-changelog"), singletonList(changelogNumPartitions - 1)));
configureDefault();
subscriptions.put("consumer10", new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
assertThrows(IllegalStateException.class, () -> partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)));
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.
the class StreamsPartitionAssignorTest method testAssignWithStandbyReplicasAndStatelessTasks.
@Test
public void testAssignWithStandbyReplicasAndStatelessTasks() {
builder.addSource(null, "source1", null, null, null, "topic1", "topic2");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1");
final List<String> topics = asList("topic1", "topic2");
createMockTaskManager(mkSet(TASK_0_0), emptySet());
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1));
subscriptions.put("consumer10", new Subscription(topics, getInfo(UUID_1, mkSet(TASK_0_0), emptySet()).encode()));
subscriptions.put("consumer20", new Subscription(topics, getInfo(UUID_2, mkSet(TASK_0_2), emptySet()).encode()));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
final AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
assertTrue(info10.standbyTasks().isEmpty());
final AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
assertTrue(info20.standbyTasks().isEmpty());
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldNotLoopInfinitelyOnMissingMetadataAndShouldNotCreateRelatedTasks.
@Test
public void shouldNotLoopInfinitelyOnMissingMetadataAndShouldNotCreateRelatedTasks() {
final StreamsBuilder streamsBuilder = new StreamsBuilder();
final KStream<Object, Object> stream1 = streamsBuilder.stream("topic1").selectKey((key, value) -> null).groupByKey().count(Materialized.as("count")).toStream().map((KeyValueMapper<Object, Long, KeyValue<Object, Object>>) (key, value) -> null);
streamsBuilder.stream("unknownTopic").selectKey((key, value) -> null).join(stream1, (ValueJoiner<Object, Object, Void>) (value1, value2) -> null, JoinWindows.of(ofMillis(0)));
final String client = "client1";
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build());
final MockInternalTopicManager mockInternalTopicManager = configureDefault();
subscriptions.put(client, new Subscription(Collections.singletonList("unknownTopic"), defaultSubscriptionInfo.encode()));
final Map<String, Assignment> assignment = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(mockInternalTopicManager.readyTopics.isEmpty(), equalTo(true));
assertThat(assignment.get(client).partitions().isEmpty(), equalTo(true));
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldEncodeMissingSourceTopicError.
@Test
public void shouldEncodeMissingSourceTopicError() {
final Cluster emptyClusterMetadata = new Cluster("cluster", Collections.singletonList(Node.noNode()), emptyList(), emptySet(), emptySet());
builder.addSource(null, "source1", null, null, null, "topic1");
configureDefault();
subscriptions.put("consumer", new Subscription(singletonList("topic"), defaultSubscriptionInfo.encode()));
final Map<String, Assignment> assignments = partitionAssignor.assign(emptyClusterMetadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(AssignmentInfo.decode(assignments.get("consumer").userData()).errCode(), equalTo(AssignorError.INCOMPLETE_SOURCE_TOPIC_METADATA.code()));
}
Aggregations