Search in sources :

Example 36 with AssignmentInfo

use of org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo in project kafka by apache.

the class StreamsPartitionAssignor method onAssignment.

@Override
public void onAssignment(final Assignment assignment, final ConsumerGroupMetadata metadata) {
    final List<TopicPartition> partitions = new ArrayList<>(assignment.partitions());
    partitions.sort(PARTITION_COMPARATOR);
    final AssignmentInfo info = AssignmentInfo.decode(assignment.userData());
    if (info.errCode() != AssignorError.NONE.code()) {
        // set flag to shutdown streams app
        assignmentErrorCode.set(info.errCode());
        return;
    }
    /*
         * latestCommonlySupportedVersion belongs to [usedSubscriptionMetadataVersion, LATEST_SUPPORTED_VERSION]
         * receivedAssignmentMetadataVersion belongs to [EARLIEST_PROBEABLE_VERSION, usedSubscriptionMetadataVersion]
         *
         * usedSubscriptionMetadataVersion will be downgraded to receivedAssignmentMetadataVersion during a rolling
         * bounce upgrade with version probing.
         *
         * usedSubscriptionMetadataVersion will be upgraded to latestCommonlySupportedVersion when all members have
         * been bounced and it is safe to use the latest version.
         */
    final int receivedAssignmentMetadataVersion = info.version();
    final int latestCommonlySupportedVersion = info.commonlySupportedVersion();
    validateMetadataVersions(receivedAssignmentMetadataVersion, latestCommonlySupportedVersion);
    // version 1 field
    final Map<TaskId, Set<TopicPartition>> activeTasks;
    // version 2 fields
    final Map<TopicPartition, PartitionInfo> topicToPartitionInfo;
    final Map<HostInfo, Set<TopicPartition>> partitionsByHost;
    final Map<HostInfo, Set<TopicPartition>> standbyPartitionsByHost;
    final long encodedNextScheduledRebalanceMs;
    switch(receivedAssignmentMetadataVersion) {
        case 1:
            validateActiveTaskEncoding(partitions, info, logPrefix);
            activeTasks = getActiveTasks(partitions, info);
            partitionsByHost = Collections.emptyMap();
            standbyPartitionsByHost = Collections.emptyMap();
            topicToPartitionInfo = Collections.emptyMap();
            encodedNextScheduledRebalanceMs = Long.MAX_VALUE;
            break;
        case 2:
        case 3:
        case 4:
        case 5:
            validateActiveTaskEncoding(partitions, info, logPrefix);
            activeTasks = getActiveTasks(partitions, info);
            partitionsByHost = info.partitionsByHost();
            standbyPartitionsByHost = Collections.emptyMap();
            topicToPartitionInfo = getTopicPartitionInfo(partitionsByHost);
            encodedNextScheduledRebalanceMs = Long.MAX_VALUE;
            break;
        case 6:
            validateActiveTaskEncoding(partitions, info, logPrefix);
            activeTasks = getActiveTasks(partitions, info);
            partitionsByHost = info.partitionsByHost();
            standbyPartitionsByHost = info.standbyPartitionByHost();
            topicToPartitionInfo = getTopicPartitionInfo(partitionsByHost);
            encodedNextScheduledRebalanceMs = Long.MAX_VALUE;
            break;
        case 7:
        case 8:
        case 9:
        case 10:
            validateActiveTaskEncoding(partitions, info, logPrefix);
            activeTasks = getActiveTasks(partitions, info);
            partitionsByHost = info.partitionsByHost();
            standbyPartitionsByHost = info.standbyPartitionByHost();
            topicToPartitionInfo = getTopicPartitionInfo(partitionsByHost);
            encodedNextScheduledRebalanceMs = info.nextRebalanceMs();
            break;
        default:
            throw new IllegalStateException("This code should never be reached." + " Please file a bug report at https://issues.apache.org/jira/projects/KAFKA/");
    }
    maybeScheduleFollowupRebalance(encodedNextScheduledRebalanceMs, receivedAssignmentMetadataVersion, latestCommonlySupportedVersion, partitionsByHost.keySet());
    final Cluster fakeCluster = Cluster.empty().withPartitions(topicToPartitionInfo);
    streamsMetadataState.onChange(partitionsByHost, standbyPartitionsByHost, fakeCluster);
    // we do not capture any exceptions but just let the exception thrown from consumer.poll directly
    // since when stream thread captures it, either we close all tasks as dirty or we close thread
    taskManager.handleAssignment(activeTasks, info.standbyTasks());
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) SortedSet(java.util.SortedSet) Set(java.util.Set) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo) HostInfo(org.apache.kafka.streams.state.HostInfo)

Example 37 with AssignmentInfo

use of org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo in project kafka by apache.

the class StreamsPartitionAssignor method addClientAssignments.

/**
 * Adds the encoded assignment for each StreamThread consumer in the client to the overall assignment map
 * @return true if a followup rebalance will be required due to revoked tasks
 */
private boolean addClientAssignments(final Set<TaskId> statefulTasks, final Map<String, Assignment> assignment, final ClientMetadata clientMetadata, final Map<TaskId, Set<TopicPartition>> partitionsForTask, final Map<HostInfo, Set<TopicPartition>> partitionsByHostState, final Map<HostInfo, Set<TopicPartition>> standbyPartitionsByHost, final Set<TopicPartition> allOwnedPartitions, final Map<String, List<TaskId>> activeTaskAssignments, final Map<String, List<TaskId>> standbyTaskAssignments, final int minUserMetadataVersion, final int minSupportedMetadataVersion, final boolean probingRebalanceNeeded) {
    boolean followupRebalanceRequiredForRevokedTasks = false;
    // We only want to encode a scheduled probing rebalance for a single member in this client
    boolean shouldEncodeProbingRebalance = probingRebalanceNeeded;
    // Loop through the consumers and build their assignment
    for (final String consumer : clientMetadata.consumers) {
        final List<TaskId> activeTasksForConsumer = activeTaskAssignments.get(consumer);
        // These will be filled in by populateActiveTaskAndPartitionsLists below
        final List<TopicPartition> activePartitionsList = new ArrayList<>();
        final List<TaskId> assignedActiveList = new ArrayList<>();
        final Set<TaskId> activeTasksRemovedPendingRevokation = populateActiveTaskAndPartitionsLists(activePartitionsList, assignedActiveList, consumer, clientMetadata.state, activeTasksForConsumer, partitionsForTask, allOwnedPartitions);
        final Map<TaskId, Set<TopicPartition>> standbyTaskMap = buildStandbyTaskMap(consumer, standbyTaskAssignments.get(consumer), activeTasksRemovedPendingRevokation, statefulTasks, partitionsForTask, clientMetadata.state);
        final AssignmentInfo info = new AssignmentInfo(minUserMetadataVersion, minSupportedMetadataVersion, assignedActiveList, standbyTaskMap, partitionsByHostState, standbyPartitionsByHost, AssignorError.NONE.code());
        if (!activeTasksRemovedPendingRevokation.isEmpty()) {
            // TODO: once KAFKA-10078 is resolved we can leave it to the client to trigger this rebalance
            log.info("Requesting followup rebalance be scheduled immediately by {} due to tasks changing ownership.", consumer);
            info.setNextRebalanceTime(0L);
            followupRebalanceRequiredForRevokedTasks = true;
            // Don't bother to schedule a probing rebalance if an immediate one is already scheduled
            shouldEncodeProbingRebalance = false;
        } else if (shouldEncodeProbingRebalance) {
            final long nextRebalanceTimeMs = time.milliseconds() + probingRebalanceIntervalMs();
            log.info("Requesting followup rebalance be scheduled by {} for {} ms to probe for caught-up replica tasks.", consumer, nextRebalanceTimeMs);
            info.setNextRebalanceTime(nextRebalanceTimeMs);
            shouldEncodeProbingRebalance = false;
        }
        assignment.put(consumer, new Assignment(activePartitionsList, info.encode()));
    }
    return followupRebalanceRequiredForRevokedTasks;
}
Also used : AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) TaskId(org.apache.kafka.streams.processor.TaskId) SortedSet(java.util.SortedSet) Set(java.util.Set) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList)

Example 38 with AssignmentInfo

use of org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo in project kafka by apache.

the class StreamsPartitionAssignor method getActiveTasks.

// protected for upgrade test
protected static Map<TaskId, Set<TopicPartition>> getActiveTasks(final List<TopicPartition> partitions, final AssignmentInfo info) {
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    for (int i = 0; i < partitions.size(); i++) {
        final TopicPartition partition = partitions.get(i);
        final TaskId id = info.activeTasks().get(i);
        activeTasks.computeIfAbsent(id, k1 -> new HashSet<>()).add(partition);
    }
    return activeTasks;
}
Also used : ClientUtils.fetchEndOffsetsFuture(org.apache.kafka.streams.processor.internals.ClientUtils.fetchEndOffsetsFuture) FallbackPriorTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.FallbackPriorTaskAssignor) SortedSet(java.util.SortedSet) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) PriorityQueue(java.util.PriorityQueue) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) ClientUtils.fetchCommittedOffsets(org.apache.kafka.streams.processor.internals.ClientUtils.fetchCommittedOffsets) ByteBuffer(java.nio.ByteBuffer) UNKNOWN_OFFSET_SUM(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo.UNKNOWN_OFFSET_SUM) Cluster(org.apache.kafka.common.Cluster) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) MissingSourceTopicException(org.apache.kafka.streams.errors.MissingSourceTopicException) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) Configurable(org.apache.kafka.common.Configurable) Time(org.apache.kafka.common.utils.Time) ReferenceContainer(org.apache.kafka.streams.processor.internals.assignment.ReferenceContainer) LATEST_SUPPORTED_VERSION(org.apache.kafka.streams.processor.internals.assignment.StreamsAssignmentProtocolVersions.LATEST_SUPPORTED_VERSION) Collection(java.util.Collection) Set(java.util.Set) KafkaFuture(org.apache.kafka.common.KafkaFuture) PartitionInfo(org.apache.kafka.common.PartitionInfo) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) AssignorConfiguration(org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration) Objects(java.util.Objects) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) Utils.filterMap(org.apache.kafka.common.utils.Utils.filterMap) List(java.util.List) Node(org.apache.kafka.common.Node) Queue(java.util.Queue) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) TaskId(org.apache.kafka.streams.processor.TaskId) AssignmentConfigs(org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration.AssignmentConfigs) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) HostInfo(org.apache.kafka.streams.state.HostInfo) HashMap(java.util.HashMap) AssignmentListener(org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration.AssignmentListener) Supplier(java.util.function.Supplier) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) CopartitionedTopicsEnforcer(org.apache.kafka.streams.processor.internals.assignment.CopartitionedTopicsEnforcer) HashSet(java.util.HashSet) UNKNOWN(org.apache.kafka.streams.processor.internals.assignment.StreamsAssignmentProtocolVersions.UNKNOWN) StickyTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.StickyTaskAssignor) Admin(org.apache.kafka.clients.admin.Admin) LinkedList(java.util.LinkedList) ConsumerPartitionAssignor(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor) Utils(org.apache.kafka.common.utils.Utils) EARLIEST_PROBEABLE_VERSION(org.apache.kafka.streams.processor.internals.assignment.StreamsAssignmentProtocolVersions.EARLIEST_PROBEABLE_VERSION) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) TaskAssignmentException(org.apache.kafka.streams.errors.TaskAssignmentException) TopicsInfo(org.apache.kafka.streams.processor.internals.InternalTopologyBuilder.TopicsInfo) AssignorError(org.apache.kafka.streams.processor.internals.assignment.AssignorError) AtomicLong(java.util.concurrent.atomic.AtomicLong) UUID.randomUUID(java.util.UUID.randomUUID) TreeMap(java.util.TreeMap) ClientState(org.apache.kafka.streams.processor.internals.assignment.ClientState) TaskAssignor(org.apache.kafka.streams.processor.internals.assignment.TaskAssignor) Comparator(java.util.Comparator) Subtopology(org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology) Collections(java.util.Collections) TaskId(org.apache.kafka.streams.processor.TaskId) SortedSet(java.util.SortedSet) Set(java.util.Set) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) HashSet(java.util.HashSet)

Example 39 with AssignmentInfo

use of org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo in project kafka by apache.

the class StreamsAssignmentScaleTest method completeLargeAssignment.

private void completeLargeAssignment(final int numPartitions, final int numClients, final int numThreadsPerClient, final int numStandbys, final Class<? extends TaskAssignor> taskAssignor) {
    final List<String> topic = singletonList("topic");
    final Map<TopicPartition, Long> changelogEndOffsets = new HashMap<>();
    for (int p = 0; p < numPartitions; ++p) {
        changelogEndOffsets.put(new TopicPartition(APPLICATION_ID + "-store-changelog", p), 100_000L);
    }
    final List<PartitionInfo> partitionInfos = new ArrayList<>();
    for (int p = 0; p < numPartitions; ++p) {
        partitionInfos.add(new PartitionInfo("topic", p, Node.noNode(), new Node[0], new Node[0]));
    }
    final Cluster clusterMetadata = new Cluster("cluster", Collections.singletonList(Node.noNode()), partitionInfos, emptySet(), emptySet());
    final Map<String, Object> configMap = new HashMap<>();
    configMap.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID);
    configMap.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:8080");
    final InternalTopologyBuilder builder = new InternalTopologyBuilder();
    builder.addSource(null, "source", null, null, null, "topic");
    builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source");
    builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor");
    final TopologyMetadata topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configMap));
    topologyMetadata.buildAndRewriteTopology();
    final Consumer<byte[], byte[]> mainConsumer = EasyMock.createNiceMock(Consumer.class);
    final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
    expect(taskManager.topologyMetadata()).andStubReturn(topologyMetadata);
    expect(mainConsumer.committed(new HashSet<>())).andStubReturn(Collections.emptyMap());
    final AdminClient adminClient = createMockAdminClientForAssignor(changelogEndOffsets);
    final ReferenceContainer referenceContainer = new ReferenceContainer();
    referenceContainer.mainConsumer = mainConsumer;
    referenceContainer.adminClient = adminClient;
    referenceContainer.taskManager = taskManager;
    referenceContainer.streamsMetadataState = EasyMock.createNiceMock(StreamsMetadataState.class);
    referenceContainer.time = new MockTime();
    configMap.put(InternalConfig.REFERENCE_CONTAINER_PARTITION_ASSIGNOR, referenceContainer);
    configMap.put(InternalConfig.INTERNAL_TASK_ASSIGNOR_CLASS, taskAssignor.getName());
    configMap.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, numStandbys);
    final MockInternalTopicManager mockInternalTopicManager = new MockInternalTopicManager(new MockTime(), new StreamsConfig(configMap), new MockClientSupplier().restoreConsumer, false);
    EasyMock.replay(taskManager, adminClient, mainConsumer);
    final StreamsPartitionAssignor partitionAssignor = new StreamsPartitionAssignor();
    partitionAssignor.configure(configMap);
    partitionAssignor.setInternalTopicManager(mockInternalTopicManager);
    final Map<String, Subscription> subscriptions = new HashMap<>();
    for (int client = 0; client < numClients; ++client) {
        for (int i = 0; i < numThreadsPerClient; ++i) {
            subscriptions.put(getConsumerName(i, client), new Subscription(topic, getInfo(uuidForInt(client), EMPTY_TASKS, EMPTY_TASKS).encode()));
        }
    }
    final long firstAssignmentStartMs = System.currentTimeMillis();
    final Map<String, Assignment> firstAssignments = partitionAssignor.assign(clusterMetadata, new GroupSubscription(subscriptions)).groupAssignment();
    final long firstAssignmentEndMs = System.currentTimeMillis();
    final long firstAssignmentDuration = firstAssignmentEndMs - firstAssignmentStartMs;
    if (firstAssignmentDuration > MAX_ASSIGNMENT_DURATION) {
        throw new AssertionError("The first assignment took too long to complete at " + firstAssignmentDuration + "ms.");
    } else {
        log.info("First assignment took {}ms.", firstAssignmentDuration);
    }
    // Use the assignment to generate the subscriptions' prev task data for the next rebalance
    for (int client = 0; client < numClients; ++client) {
        for (int i = 0; i < numThreadsPerClient; ++i) {
            final String consumer = getConsumerName(i, client);
            final Assignment assignment = firstAssignments.get(consumer);
            final AssignmentInfo info = AssignmentInfo.decode(assignment.userData());
            subscriptions.put(consumer, new Subscription(topic, getInfo(uuidForInt(client), new HashSet<>(info.activeTasks()), info.standbyTasks().keySet()).encode(), assignment.partitions()));
        }
    }
    final long secondAssignmentStartMs = System.currentTimeMillis();
    final Map<String, Assignment> secondAssignments = partitionAssignor.assign(clusterMetadata, new GroupSubscription(subscriptions)).groupAssignment();
    final long secondAssignmentEndMs = System.currentTimeMillis();
    final long secondAssignmentDuration = secondAssignmentEndMs - secondAssignmentStartMs;
    if (secondAssignmentDuration > MAX_ASSIGNMENT_DURATION) {
        throw new AssertionError("The second assignment took too long to complete at " + secondAssignmentDuration + "ms.");
    } else {
        log.info("Second assignment took {}ms.", secondAssignmentDuration);
    }
    assertThat(secondAssignments.size(), is(numClients * numThreadsPerClient));
}
Also used : ReferenceContainer(org.apache.kafka.streams.processor.internals.assignment.ReferenceContainer) HashMap(java.util.HashMap) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) PartitionInfo(org.apache.kafka.common.PartitionInfo) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) HashSet(java.util.HashSet) Cluster(org.apache.kafka.common.Cluster) TopicPartition(org.apache.kafka.common.TopicPartition) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) AdminClient(org.apache.kafka.clients.admin.AdminClient)

Example 40 with AssignmentInfo

use of org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo in project kafka by apache.

the class StreamsPartitionAssignorTest method testAssignWithStandbyReplicasAndLoggingDisabled.

@Test
public void testAssignWithStandbyReplicasAndLoggingDisabled() {
    builder.addSource(null, "source1", null, null, null, "topic1", "topic2");
    builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store1", false).withLoggingDisabled(), "processor");
    final List<String> topics = asList("topic1", "topic2");
    createMockTaskManager(mkSet(TASK_0_0), emptySet());
    configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1));
    subscriptions.put("consumer10", new Subscription(topics, getInfo(UUID_1, mkSet(TASK_0_0), emptySet()).encode()));
    subscriptions.put("consumer20", new Subscription(topics, getInfo(UUID_2, mkSet(TASK_0_2), emptySet()).encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    final AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
    assertTrue(info10.standbyTasks().isEmpty());
    final AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
    assertTrue(info20.standbyTasks().isEmpty());
}
Also used : Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) Test(org.junit.Test)

Aggregations

AssignmentInfo (org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo)46 TaskId (org.apache.kafka.streams.processor.TaskId)41 HashSet (java.util.HashSet)38 Test (org.junit.Test)35 HashMap (java.util.HashMap)29 TopicPartition (org.apache.kafka.common.TopicPartition)24 UUID (java.util.UUID)21 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)21 SubscriptionInfo (org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo)20 HostInfo (org.apache.kafka.streams.state.HostInfo)17 Set (java.util.Set)16 MockInternalTopicManager (org.apache.kafka.test.MockInternalTopicManager)16 MockProcessorSupplier (org.apache.kafka.test.MockProcessorSupplier)16 Assignment (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment)15 GroupSubscription (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription)14 Subscription (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription)14 ArrayList (java.util.ArrayList)12 Cluster (org.apache.kafka.common.Cluster)12 PartitionInfo (org.apache.kafka.common.PartitionInfo)10 StreamsBuilderTest (org.apache.kafka.streams.StreamsBuilderTest)10