use of org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology in project kafka by apache.
the class ChangelogTopicsTest method shouldOnlyContainPreExistingNonSourceBasedChangelogs.
@Test
public void shouldOnlyContainPreExistingNonSourceBasedChangelogs() {
expect(internalTopicManager.makeReady(mkMap(mkEntry(CHANGELOG_TOPIC_NAME1, CHANGELOG_TOPIC_CONFIG)))).andStubReturn(Collections.emptySet());
final Map<Subtopology, TopicsInfo> topicGroups = mkMap(mkEntry(SUBTOPOLOGY_0, TOPICS_INFO1));
final Set<TaskId> tasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(mkEntry(SUBTOPOLOGY_0, tasks));
replay(internalTopicManager);
final ChangelogTopics changelogTopics = new ChangelogTopics(internalTopicManager, topicGroups, tasksForTopicGroup, "[test] ");
changelogTopics.setup();
verify(internalTopicManager);
assertThat(CHANGELOG_TOPIC_CONFIG.numberOfPartitions().orElse(Integer.MIN_VALUE), is(3));
final TopicPartition changelogPartition0 = new TopicPartition(CHANGELOG_TOPIC_NAME1, 0);
final TopicPartition changelogPartition1 = new TopicPartition(CHANGELOG_TOPIC_NAME1, 1);
final TopicPartition changelogPartition2 = new TopicPartition(CHANGELOG_TOPIC_NAME1, 2);
assertThat(changelogTopics.preExistingPartitionsFor(TASK_0_0), is(mkSet(changelogPartition0)));
assertThat(changelogTopics.preExistingPartitionsFor(TASK_0_1), is(mkSet(changelogPartition1)));
assertThat(changelogTopics.preExistingPartitionsFor(TASK_0_2), is(mkSet(changelogPartition2)));
assertThat(changelogTopics.preExistingSourceTopicBasedPartitions(), is(Collections.emptySet()));
assertThat(changelogTopics.preExistingNonSourceTopicBasedPartitions(), is(mkSet(changelogPartition0, changelogPartition1, changelogPartition2)));
}
use of org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology in project kafka by apache.
the class ChangelogTopicsTest method shouldOnlyContainPreExistingSourceBasedChangelogs.
@Test
public void shouldOnlyContainPreExistingSourceBasedChangelogs() {
expect(internalTopicManager.makeReady(Collections.emptyMap())).andStubReturn(Collections.emptySet());
final Map<Subtopology, TopicsInfo> topicGroups = mkMap(mkEntry(SUBTOPOLOGY_0, TOPICS_INFO3));
final Set<TaskId> tasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(mkEntry(SUBTOPOLOGY_0, tasks));
replay(internalTopicManager);
final ChangelogTopics changelogTopics = new ChangelogTopics(internalTopicManager, topicGroups, tasksForTopicGroup, "[test] ");
changelogTopics.setup();
verify(internalTopicManager);
final TopicPartition changelogPartition0 = new TopicPartition(SOURCE_TOPIC_NAME, 0);
final TopicPartition changelogPartition1 = new TopicPartition(SOURCE_TOPIC_NAME, 1);
final TopicPartition changelogPartition2 = new TopicPartition(SOURCE_TOPIC_NAME, 2);
assertThat(changelogTopics.preExistingPartitionsFor(TASK_0_0), is(mkSet(changelogPartition0)));
assertThat(changelogTopics.preExistingPartitionsFor(TASK_0_1), is(mkSet(changelogPartition1)));
assertThat(changelogTopics.preExistingPartitionsFor(TASK_0_2), is(mkSet(changelogPartition2)));
assertThat(changelogTopics.preExistingSourceTopicBasedPartitions(), is(mkSet(changelogPartition0, changelogPartition1, changelogPartition2)));
assertThat(changelogTopics.preExistingNonSourceTopicBasedPartitions(), is(Collections.emptySet()));
}
use of org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology in project kafka by apache.
the class ChangelogTopicsTest method shouldContainBothTypesOfPreExistingChangelogs.
@Test
public void shouldContainBothTypesOfPreExistingChangelogs() {
expect(internalTopicManager.makeReady(mkMap(mkEntry(CHANGELOG_TOPIC_NAME1, CHANGELOG_TOPIC_CONFIG)))).andStubReturn(Collections.emptySet());
final Map<Subtopology, TopicsInfo> topicGroups = mkMap(mkEntry(SUBTOPOLOGY_0, TOPICS_INFO4));
final Set<TaskId> tasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(mkEntry(SUBTOPOLOGY_0, tasks));
replay(internalTopicManager);
final ChangelogTopics changelogTopics = new ChangelogTopics(internalTopicManager, topicGroups, tasksForTopicGroup, "[test] ");
changelogTopics.setup();
verify(internalTopicManager);
assertThat(CHANGELOG_TOPIC_CONFIG.numberOfPartitions().orElse(Integer.MIN_VALUE), is(3));
final TopicPartition changelogPartition0 = new TopicPartition(CHANGELOG_TOPIC_NAME1, 0);
final TopicPartition changelogPartition1 = new TopicPartition(CHANGELOG_TOPIC_NAME1, 1);
final TopicPartition changelogPartition2 = new TopicPartition(CHANGELOG_TOPIC_NAME1, 2);
final TopicPartition sourcePartition0 = new TopicPartition(SOURCE_TOPIC_NAME, 0);
final TopicPartition sourcePartition1 = new TopicPartition(SOURCE_TOPIC_NAME, 1);
final TopicPartition sourcePartition2 = new TopicPartition(SOURCE_TOPIC_NAME, 2);
assertThat(changelogTopics.preExistingPartitionsFor(TASK_0_0), is(mkSet(sourcePartition0, changelogPartition0)));
assertThat(changelogTopics.preExistingPartitionsFor(TASK_0_1), is(mkSet(sourcePartition1, changelogPartition1)));
assertThat(changelogTopics.preExistingPartitionsFor(TASK_0_2), is(mkSet(sourcePartition2, changelogPartition2)));
assertThat(changelogTopics.preExistingSourceTopicBasedPartitions(), is(mkSet(sourcePartition0, sourcePartition1, sourcePartition2)));
assertThat(changelogTopics.preExistingNonSourceTopicBasedPartitions(), is(mkSet(changelogPartition0, changelogPartition1, changelogPartition2)));
}
use of org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology in project kafka by apache.
the class StreamsPartitionAssignor method populateTasksForMaps.
/**
* Populates the taskForPartition and tasksForTopicGroup maps, and checks that partitions are assigned to exactly
* one task.
*
* @param taskForPartition a map from partition to the corresponding task. Populated here.
* @param tasksForTopicGroup a map from the topicGroupId to the set of corresponding tasks. Populated here.
* @param allSourceTopics a set of all source topics in the topology
* @param partitionsForTask a map from task to the set of input partitions
* @param fullMetadata the cluster metadata
*/
private void populateTasksForMaps(final Map<TopicPartition, TaskId> taskForPartition, final Map<Subtopology, Set<TaskId>> tasksForTopicGroup, final Set<String> allSourceTopics, final Map<TaskId, Set<TopicPartition>> partitionsForTask, final Cluster fullMetadata) {
// check if all partitions are assigned, and there are no duplicates of partitions in multiple tasks
final Set<TopicPartition> allAssignedPartitions = new HashSet<>();
for (final Map.Entry<TaskId, Set<TopicPartition>> entry : partitionsForTask.entrySet()) {
final TaskId id = entry.getKey();
final Set<TopicPartition> partitions = entry.getValue();
for (final TopicPartition partition : partitions) {
taskForPartition.put(partition, id);
if (allAssignedPartitions.contains(partition)) {
log.warn("Partition {} is assigned to more than one tasks: {}", partition, partitionsForTask);
}
}
allAssignedPartitions.addAll(partitions);
tasksForTopicGroup.computeIfAbsent(new Subtopology(id.subtopology(), id.topologyName()), k -> new HashSet<>()).add(id);
}
checkAllPartitions(allSourceTopics, partitionsForTask, allAssignedPartitions, fullMetadata);
}
use of org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology in project kafka by apache.
the class StreamsPartitionAssignorTest method tasksForState.
private static Set<TaskId> tasksForState(final String storeName, final List<TaskId> tasks, final Map<Subtopology, InternalTopologyBuilder.TopicsInfo> topicGroups) {
final String changelogTopic = ProcessorStateManager.storeChangelogTopic(APPLICATION_ID, storeName, null);
final Set<TaskId> ids = new HashSet<>();
for (final Map.Entry<Subtopology, InternalTopologyBuilder.TopicsInfo> entry : topicGroups.entrySet()) {
final Set<String> stateChangelogTopics = entry.getValue().stateChangelogTopics.keySet();
if (stateChangelogTopics.contains(changelogTopic)) {
for (final TaskId id : tasks) {
if (id.subtopology() == entry.getKey().nodeGroupId) {
ids.add(id);
}
}
}
}
return ids;
}
Aggregations