use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldTriggerImmediateRebalanceOnTasksRevoked.
@Test
public void shouldTriggerImmediateRebalanceOnTasksRevoked() {
builder.addSource(null, "source1", null, null, null, "topic1");
final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
final List<TopicPartition> allPartitions = asList(t1p0, t1p1, t1p2);
subscriptions.put(CONSUMER_1, new Subscription(Collections.singletonList("topic1"), getInfo(UUID_1, allTasks, EMPTY_TASKS).encode(), allPartitions));
subscriptions.put(CONSUMER_2, new Subscription(Collections.singletonList("topic1"), getInfo(UUID_1, EMPTY_TASKS, allTasks).encode(), emptyList()));
createMockTaskManager(allTasks, allTasks);
configurePartitionAssignorWith(singletonMap(StreamsConfig.ACCEPTABLE_RECOVERY_LAG_CONFIG, 0L));
final Map<String, Assignment> assignment = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
// Verify at least one partition was revoked
assertThat(assignment.get(CONSUMER_1).partitions(), not(allPartitions));
assertThat(assignment.get(CONSUMER_2).partitions(), equalTo(emptyList()));
// Verify that stateless revoked tasks would not be assigned as standbys
assertThat(AssignmentInfo.decode(assignment.get(CONSUMER_2).userData()).activeTasks(), equalTo(emptyList()));
assertThat(AssignmentInfo.decode(assignment.get(CONSUMER_2).userData()).standbyTasks(), equalTo(emptyMap()));
partitionAssignor.onAssignment(assignment.get(CONSUMER_2), null);
assertThat(referenceContainer.nextScheduledRebalanceMs.get(), is(0L));
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldRequestCommittedOffsetsForPreexistingSourceChangelogs.
@Test
public void shouldRequestCommittedOffsetsForPreexistingSourceChangelogs() {
final Set<TopicPartition> changelogs = mkSet(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic1", 2));
final StreamsBuilder streamsBuilder = new StreamsBuilder();
streamsBuilder.table("topic1", Materialized.as("store"));
final Properties props = new Properties();
props.putAll(configProps());
props.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE);
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build(props));
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(props));
subscriptions.put("consumer10", new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
createDefaultMockTaskManager();
configurePartitionAssignorWith(singletonMap(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE));
overwriteInternalTopicManagerWithMock(false);
final Consumer<byte[], byte[]> consumerClient = referenceContainer.mainConsumer;
EasyMock.expect(consumerClient.committed(EasyMock.eq(changelogs))).andReturn(changelogs.stream().collect(Collectors.toMap(tp -> tp, tp -> new OffsetAndMetadata(Long.MAX_VALUE)))).once();
EasyMock.replay(consumerClient);
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
EasyMock.verify(consumerClient);
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldRequestEndOffsetsForPreexistingChangelogs.
@Test
public void shouldRequestEndOffsetsForPreexistingChangelogs() {
final Set<TopicPartition> changelogs = mkSet(new TopicPartition(APPLICATION_ID + "-store-changelog", 0), new TopicPartition(APPLICATION_ID + "-store-changelog", 1), new TopicPartition(APPLICATION_ID + "-store-changelog", 2));
adminClient = EasyMock.createMock(AdminClient.class);
final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
allFuture.complete(changelogs.stream().collect(Collectors.toMap(tp -> tp, tp -> {
final ListOffsetsResultInfo info = EasyMock.createNiceMock(ListOffsetsResultInfo.class);
expect(info.offset()).andStubReturn(Long.MAX_VALUE);
EasyMock.replay(info);
return info;
})));
final Capture<Map<TopicPartition, OffsetSpec>> capturedChangelogs = EasyMock.newCapture();
expect(adminClient.listOffsets(EasyMock.capture(capturedChangelogs))).andReturn(result).once();
expect(result.all()).andReturn(allFuture);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor1");
subscriptions.put("consumer10", new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
EasyMock.replay(result);
configureDefault();
overwriteInternalTopicManagerWithMock(false);
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
EasyMock.verify(adminClient);
assertThat(capturedChangelogs.getValue().keySet(), equalTo(changelogs));
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldMapUserEndPointToTopicPartitions.
@Test
public void shouldMapUserEndPointToTopicPartitions() {
builder.addSource(null, "source", null, null, null, "topic1");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source");
builder.addSink("sink", "output", null, null, null, "processor");
final List<String> topics = Collections.singletonList("topic1");
createDefaultMockTaskManager();
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.APPLICATION_SERVER_CONFIG, USER_END_POINT));
subscriptions.put("consumer1", new Subscription(topics, getInfo(UUID_1, EMPTY_TASKS, EMPTY_TASKS, USER_END_POINT).encode()));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
final Assignment consumerAssignment = assignments.get("consumer1");
final AssignmentInfo assignmentInfo = AssignmentInfo.decode(consumerAssignment.userData());
final Set<TopicPartition> topicPartitions = assignmentInfo.partitionsByHost().get(new HostInfo("localhost", 8080));
assertEquals(mkSet(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic1", 2)), topicPartitions);
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldGenerateTasksForAllCreatedPartitions.
@Test
public void shouldGenerateTasksForAllCreatedPartitions() {
final StreamsBuilder streamsBuilder = new StreamsBuilder();
// KStream with 3 partitions
final KStream<Object, Object> stream1 = streamsBuilder.stream("topic1").map((KeyValueMapper<Object, Object, KeyValue<Object, Object>>) KeyValue::new);
// KTable with 4 partitions
final KTable<Object, Long> table1 = streamsBuilder.table("topic3").groupBy(KeyValue::new).count();
// joining the stream and the table
// this triggers the enforceCopartitioning() routine in the StreamsPartitionAssignor,
// forcing the stream.map to get repartitioned to a topic with four partitions.
stream1.join(table1, (ValueJoiner<Object, Object, Void>) (value1, value2) -> null);
final String client = "client1";
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build());
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configProps()));
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(asList(APPLICATION_ID + "-topic3-STATE-STORE-0000000002-changelog", APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-changelog"), asList(4, 4)));
final MockInternalTopicManager mockInternalTopicManager = configureDefault();
subscriptions.put(client, new Subscription(asList("topic1", "topic3"), defaultSubscriptionInfo.encode()));
final Map<String, Assignment> assignment = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
final Map<String, Integer> expectedCreatedInternalTopics = new HashMap<>();
expectedCreatedInternalTopics.put(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 4);
expectedCreatedInternalTopics.put(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-changelog", 4);
expectedCreatedInternalTopics.put(APPLICATION_ID + "-topic3-STATE-STORE-0000000002-changelog", 4);
expectedCreatedInternalTopics.put(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 4);
// check if all internal topics were created as expected
assertThat(mockInternalTopicManager.readyTopics, equalTo(expectedCreatedInternalTopics));
final List<TopicPartition> expectedAssignment = asList(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic1", 2), new TopicPartition("topic3", 0), new TopicPartition("topic3", 1), new TopicPartition("topic3", 2), new TopicPartition("topic3", 3), new TopicPartition(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 0), new TopicPartition(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 1), new TopicPartition(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 2), new TopicPartition(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 3), new TopicPartition(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 0), new TopicPartition(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 1), new TopicPartition(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 2), new TopicPartition(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 3));
// check if we created a task for all expected topicPartitions.
assertThat(new HashSet<>(assignment.get(client).partitions()), equalTo(new HashSet<>(expectedAssignment)));
}
Aggregations