use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription in project kafka by apache.
the class AbstractStickyAssignorTest method testTwoConsumersOneTopicTwoPartitions.
@Test
public void testTwoConsumersOneTopicTwoPartitions() {
Map<String, Integer> partitionsPerTopic = new HashMap<>();
partitionsPerTopic.put(topic, 2);
subscriptions.put(consumer1, new Subscription(topics(topic)));
subscriptions.put(consumer2, new Subscription(topics(topic)));
Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions);
assertEquals(partitions(tp(topic, 0)), assignment.get(consumer1));
assertEquals(partitions(tp(topic, 1)), assignment.get(consumer2));
assertTrue(assignor.partitionsTransferringOwnership.isEmpty());
verifyValidityAndBalance(subscriptions, assignment, partitionsPerTopic);
assertTrue(isFullyBalanced(assignment));
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount.
@Test
public void shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount() {
builder = new CorruptedInternalTopologyBuilder();
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configProps()));
final InternalStreamsBuilder streamsBuilder = new InternalStreamsBuilder(builder);
final KStream<String, String> inputTopic = streamsBuilder.stream(singleton("topic1"), new ConsumedInternal<>());
final KTable<String, String> inputTable = streamsBuilder.table("topic2", new ConsumedInternal<>(), new MaterializedInternal<>(Materialized.as("store")));
inputTopic.groupBy((k, v) -> k, Grouped.with("GroupName", Serdes.String(), Serdes.String())).windowedBy(TimeWindows.of(Duration.ofMinutes(10))).aggregate(() -> "", (k, v, a) -> a + k).leftJoin(inputTable, v -> v, (x, y) -> x + y);
streamsBuilder.buildAndOptimizeTopology();
configureDefault();
subscriptions.put("consumer", new Subscription(singletonList("topic"), defaultSubscriptionInfo.encode()));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(AssignmentInfo.decode(assignments.get("consumer").userData()).errCode(), equalTo(AssignorError.ASSIGNMENT_ERROR.code()));
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription in project kafka by apache.
the class StreamsPartitionAssignorTest method testAssignBasic.
@Test
public void testAssignBasic() {
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1", "source2");
builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor");
final List<String> topics = asList("topic1", "topic2");
final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
final Set<TaskId> prevTasks10 = mkSet(TASK_0_0);
final Set<TaskId> prevTasks11 = mkSet(TASK_0_1);
final Set<TaskId> prevTasks20 = mkSet(TASK_0_2);
final Set<TaskId> standbyTasks10 = EMPTY_TASKS;
final Set<TaskId> standbyTasks11 = mkSet(TASK_0_2);
final Set<TaskId> standbyTasks20 = mkSet(TASK_0_0);
createMockTaskManager(prevTasks10, standbyTasks10);
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(singletonList(APPLICATION_ID + "-store-changelog"), singletonList(3)));
configureDefaultPartitionAssignor();
subscriptions.put("consumer10", new Subscription(topics, getInfo(UUID_1, prevTasks10, standbyTasks10).encode()));
subscriptions.put("consumer11", new Subscription(topics, getInfo(UUID_1, prevTasks11, standbyTasks11).encode()));
subscriptions.put("consumer20", new Subscription(topics, getInfo(UUID_2, prevTasks20, standbyTasks20).encode()));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
// check the assignment
assertEquals(mkSet(mkSet(t1p0, t2p0), mkSet(t1p1, t2p1)), mkSet(new HashSet<>(assignments.get("consumer10").partitions()), new HashSet<>(assignments.get("consumer11").partitions())));
assertEquals(mkSet(t1p2, t2p2), new HashSet<>(assignments.get("consumer20").partitions()));
// check assignment info
// the first consumer
final AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
final Set<TaskId> allActiveTasks = new HashSet<>(info10.activeTasks());
// the second consumer
final AssignmentInfo info11 = checkAssignment(allTopics, assignments.get("consumer11"));
allActiveTasks.addAll(info11.activeTasks());
assertEquals(mkSet(TASK_0_0, TASK_0_1), allActiveTasks);
// the third consumer
final AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
allActiveTasks.addAll(info20.activeTasks());
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, new HashSet<>(allActiveTasks));
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, allActiveTasks);
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldAssignEvenlyAcrossConsumersOneClientMultipleThreads.
@Test
public void shouldAssignEvenlyAcrossConsumersOneClientMultipleThreads() {
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1");
builder.addProcessor("processorII", new MockApiProcessorSupplier<>(), "source2");
final List<PartitionInfo> localInfos = asList(new PartitionInfo("topic1", 0, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic1", 1, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic1", 2, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic1", 3, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic2", 0, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic2", 1, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic2", 2, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic2", 3, Node.noNode(), new Node[0], new Node[0]));
final Cluster localMetadata = new Cluster("cluster", Collections.singletonList(Node.noNode()), localInfos, emptySet(), emptySet());
final List<String> topics = asList("topic1", "topic2");
configureDefault();
subscriptions.put("consumer10", new Subscription(topics, defaultSubscriptionInfo.encode()));
subscriptions.put("consumer11", new Subscription(topics, defaultSubscriptionInfo.encode()));
final Map<String, Assignment> assignments = partitionAssignor.assign(localMetadata, new GroupSubscription(subscriptions)).groupAssignment();
// check assigned partitions
assertEquals(mkSet(mkSet(t2p2, t1p0, t1p2, t2p0), mkSet(t1p1, t2p1, t1p3, t2p3)), mkSet(new HashSet<>(assignments.get("consumer10").partitions()), new HashSet<>(assignments.get("consumer11").partitions())));
// the first consumer
final AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
final List<TaskId> expectedInfo10TaskIds = asList(TASK_0_0, TASK_0_2, TASK_1_0, TASK_1_2);
assertEquals(expectedInfo10TaskIds, info10.activeTasks());
// the second consumer
final AssignmentInfo info11 = AssignmentInfo.decode(assignments.get("consumer11").userData());
final List<TaskId> expectedInfo11TaskIds = asList(TASK_0_1, TASK_0_3, TASK_1_1, TASK_1_3);
assertEquals(expectedInfo11TaskIds, info11.activeTasks());
}
use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldThrowTimeoutExceptionWhenCreatingChangelogTopicsTimesOut.
@Test
public void shouldThrowTimeoutExceptionWhenCreatingChangelogTopicsTimesOut() {
final StreamsConfig config = new StreamsConfig(configProps());
final StreamsBuilder streamsBuilder = new StreamsBuilder();
streamsBuilder.table("topic1", Materialized.as("store"));
final String client = "client1";
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build());
topologyMetadata = new TopologyMetadata(builder, config);
createDefaultMockTaskManager();
EasyMock.replay(taskManager);
partitionAssignor.configure(configProps());
final MockInternalTopicManager mockInternalTopicManager = new MockInternalTopicManager(time, config, mockClientSupplier.restoreConsumer, false) {
@Override
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) {
if (topics.isEmpty()) {
return emptySet();
}
throw new TimeoutException("KABOOM!");
}
};
partitionAssignor.setInternalTopicManager(mockInternalTopicManager);
subscriptions.put(client, new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
assertThrows(TimeoutException.class, () -> partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)));
}
Aggregations