Search in sources :

Example 21 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.

the class StreamsPartitionAssignorTest method testOnAssignment.

@Test
public void testOnAssignment() {
    taskManager = EasyMock.createStrictMock(TaskManager.class);
    final Map<HostInfo, Set<TopicPartition>> hostState = Collections.singletonMap(new HostInfo("localhost", 9090), mkSet(t3p0, t3p3));
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    activeTasks.put(TASK_0_0, mkSet(t3p0));
    activeTasks.put(TASK_0_3, mkSet(t3p3));
    final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
    standbyTasks.put(TASK_0_1, mkSet(t3p1));
    standbyTasks.put(TASK_0_2, mkSet(t3p2));
    taskManager.handleAssignment(activeTasks, standbyTasks);
    EasyMock.expectLastCall();
    streamsMetadataState = EasyMock.createStrictMock(StreamsMetadataState.class);
    final Capture<Cluster> capturedCluster = EasyMock.newCapture();
    streamsMetadataState.onChange(EasyMock.eq(hostState), EasyMock.anyObject(), EasyMock.capture(capturedCluster));
    EasyMock.expectLastCall();
    EasyMock.replay(streamsMetadataState);
    configureDefaultPartitionAssignor();
    final List<TaskId> activeTaskList = asList(TASK_0_0, TASK_0_3);
    final AssignmentInfo info = new AssignmentInfo(LATEST_SUPPORTED_VERSION, activeTaskList, standbyTasks, hostState, emptyMap(), 0);
    final Assignment assignment = new Assignment(asList(t3p0, t3p3), info.encode());
    partitionAssignor.onAssignment(assignment, null);
    EasyMock.verify(streamsMetadataState);
    EasyMock.verify(taskManager);
    assertEquals(singleton(t3p0.topic()), capturedCluster.getValue().topics());
    assertEquals(2, capturedCluster.getValue().partitionsForTopic(t3p0.topic()).size());
}
Also used : Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) Utils.mkSortedSet(org.apache.kafka.common.utils.Utils.mkSortedSet) SortedSet(java.util.SortedSet) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) Cluster(org.apache.kafka.common.Cluster) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) HostInfo(org.apache.kafka.streams.state.HostInfo) Test(org.junit.Test)

Example 22 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.

the class StreamsMetadataStateTest method shouldGetInstancesForStoreName.

@Test
public void shouldGetInstancesForStoreName() {
    final StreamsMetadata one = new StreamsMetadataImpl(hostOne, mkSet(globalTable, "table-one", "table-two", "merged-table"), mkSet(topic1P0, topic2P1, topic4P0), mkSet("table-one", "table-two", "merged-table"), mkSet(topic2P0, topic1P1));
    final StreamsMetadata two = new StreamsMetadataImpl(hostTwo, mkSet(globalTable, "table-two", "table-one", "merged-table"), mkSet(topic2P0, topic1P1), mkSet("table-three"), mkSet(topic3P0));
    final Collection<StreamsMetadata> actual = metadataState.getAllMetadataForStore("table-one");
    final Map<HostInfo, StreamsMetadata> actualAsMap = actual.stream().collect(Collectors.toMap(StreamsMetadata::hostInfo, Function.identity()));
    assertEquals(3, actual.size());
    assertTrue("expected " + actual + " to contain " + one, actual.contains(one));
    assertTrue("expected " + actual + " to contain " + two, actual.contains(two));
    assertTrue("expected " + hostThree + " to contain as standby", actualAsMap.get(hostThree).standbyStateStoreNames().contains("table-one"));
}
Also used : StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) HostInfo(org.apache.kafka.streams.state.HostInfo) Test(org.junit.Test)

Example 23 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.

the class StreamsMetadataStateTest method shouldGetAllStreamsInstancesWithNoStores.

@Test
public void shouldGetAllStreamsInstancesWithNoStores() {
    builder.stream("topic-five").filter((key, value) -> true).to("some-other-topic");
    final TopicPartition tp5 = new TopicPartition("topic-five", 1);
    final HostInfo hostFour = new HostInfo("host-four", 8080);
    hostToActivePartitions.put(hostFour, mkSet(tp5));
    metadataState.onChange(hostToActivePartitions, Collections.emptyMap(), cluster.withPartitions(Collections.singletonMap(tp5, new PartitionInfo("topic-five", 1, null, null, null))));
    final StreamsMetadata expected = new StreamsMetadataImpl(hostFour, Collections.singleton(globalTable), Collections.singleton(tp5), Collections.emptySet(), Collections.emptySet());
    final Collection<StreamsMetadata> actual = metadataState.getAllMetadata();
    assertTrue("expected " + actual + " to contain " + expected, actual.contains(expected));
}
Also used : Arrays(java.util.Arrays) HostInfo(org.apache.kafka.streams.state.HostInfo) Assert.assertThrows(org.junit.Assert.assertThrows) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) Function(java.util.function.Function) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) LogContext(org.apache.kafka.common.utils.LogContext) TopologyWrapper(org.apache.kafka.streams.TopologyWrapper) Map(java.util.Map) Serdes(org.apache.kafka.common.serialization.Serdes) Before(org.junit.Before) TopicPartition(org.apache.kafka.common.TopicPartition) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamPartitioner(org.apache.kafka.streams.processor.StreamPartitioner) Consumed(org.apache.kafka.streams.kstream.Consumed) Assert.assertNotNull(org.junit.Assert.assertNotNull) Collection(java.util.Collection) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) PartitionInfo(org.apache.kafka.common.PartitionInfo) Collectors(java.util.stream.Collectors) List(java.util.List) Assert.assertNull(org.junit.Assert.assertNull) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) Serializer(org.apache.kafka.common.serialization.Serializer) Assert.assertFalse(org.junit.Assert.assertFalse) Materialized(org.apache.kafka.streams.kstream.Materialized) Node(org.apache.kafka.common.Node) DummyStreamsConfig(org.apache.kafka.streams.processor.internals.testutil.DummyStreamsConfig) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) TopicPartition(org.apache.kafka.common.TopicPartition) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) PartitionInfo(org.apache.kafka.common.PartitionInfo) HostInfo(org.apache.kafka.streams.state.HostInfo) Test(org.junit.Test)

Example 24 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.

the class StreamsMetadataState method rebuildMetadataForSingleTopology.

private List<StreamsMetadata> rebuildMetadataForSingleTopology(final Map<HostInfo, Set<TopicPartition>> activePartitionHostMap, final Map<HostInfo, Set<TopicPartition>> standbyPartitionHostMap) {
    final List<StreamsMetadata> rebuiltMetadata = new ArrayList<>();
    final Map<String, List<String>> storeToSourceTopics = topologyMetadata.stateStoreNameToSourceTopics();
    Stream.concat(activePartitionHostMap.keySet().stream(), standbyPartitionHostMap.keySet().stream()).distinct().sorted(Comparator.comparing(HostInfo::host).thenComparingInt(HostInfo::port)).forEach(hostInfo -> {
        final Set<TopicPartition> activePartitionsOnHost = new HashSet<>();
        final Set<String> activeStoresOnHost = new HashSet<>();
        if (activePartitionHostMap.containsKey(hostInfo)) {
            activePartitionsOnHost.addAll(activePartitionHostMap.get(hostInfo));
            activeStoresOnHost.addAll(getStoresOnHost(storeToSourceTopics, activePartitionsOnHost));
        }
        activeStoresOnHost.addAll(globalStores);
        final Set<TopicPartition> standbyPartitionsOnHost = new HashSet<>();
        final Set<String> standbyStoresOnHost = new HashSet<>();
        if (standbyPartitionHostMap.containsKey(hostInfo)) {
            standbyPartitionsOnHost.addAll(standbyPartitionHostMap.get(hostInfo));
            standbyStoresOnHost.addAll(getStoresOnHost(storeToSourceTopics, standbyPartitionsOnHost));
        }
        final StreamsMetadata metadata = new StreamsMetadataImpl(hostInfo, activeStoresOnHost, activePartitionsOnHost, standbyStoresOnHost, standbyPartitionsOnHost);
        rebuiltMetadata.add(metadata);
        if (hostInfo.equals(thisHost)) {
            localMetadata.set(metadata);
        }
    });
    return rebuiltMetadata;
}
Also used : StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) TopicPartition(org.apache.kafka.common.TopicPartition) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) HostInfo(org.apache.kafka.streams.state.HostInfo) HashSet(java.util.HashSet)

Example 25 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.

the class StreamsMetadataState method rebuildMetadataForNamedTopologies.

private List<StreamsMetadata> rebuildMetadataForNamedTopologies(final Map<HostInfo, Set<TopicPartition>> activePartitionHostMap, final Map<HostInfo, Set<TopicPartition>> standbyPartitionHostMap) {
    final List<StreamsMetadata> rebuiltMetadata = new ArrayList<>();
    Stream.concat(activePartitionHostMap.keySet().stream(), standbyPartitionHostMap.keySet().stream()).distinct().sorted(Comparator.comparing(HostInfo::host).thenComparingInt(HostInfo::port)).forEach(hostInfo -> {
        for (final String topologyName : topologyMetadata.namedTopologiesView()) {
            final Map<String, List<String>> storeToSourceTopics = topologyMetadata.stateStoreNameToSourceTopicsForTopology(topologyName);
            final Set<TopicPartition> activePartitionsOnHost = new HashSet<>();
            final Set<String> activeStoresOnHost = new HashSet<>();
            if (activePartitionHostMap.containsKey(hostInfo)) {
                // filter out partitions for topics that are not connected to this topology
                activePartitionsOnHost.addAll(activePartitionHostMap.get(hostInfo).stream().filter(tp -> topologyMetadata.fullSourceTopicNamesForTopology(topologyName).contains(tp.topic())).collect(Collectors.toSet()));
                activeStoresOnHost.addAll(getStoresOnHost(storeToSourceTopics, activePartitionsOnHost));
            }
            // TODO KAFKA-13281: when we add support for global stores with named topologies we will
            // need to add the global stores to the activeStoresOnHost set
            final Set<TopicPartition> standbyPartitionsOnHost = new HashSet<>();
            final Set<String> standbyStoresOnHost = new HashSet<>();
            if (standbyPartitionHostMap.containsKey(hostInfo)) {
                standbyPartitionsOnHost.addAll(standbyPartitionHostMap.get(hostInfo).stream().filter(tp -> topologyMetadata.fullSourceTopicNamesForTopology(topologyName).contains(tp.topic())).collect(Collectors.toSet()));
                standbyStoresOnHost.addAll(getStoresOnHost(storeToSourceTopics, standbyPartitionsOnHost));
            }
            if (!(activeStoresOnHost.isEmpty() && activePartitionsOnHost.isEmpty() && standbyStoresOnHost.isEmpty() && standbyPartitionsOnHost.isEmpty())) {
                final StreamsMetadata metadata = new StreamsMetadataImpl(hostInfo, activeStoresOnHost, activePartitionsOnHost, standbyStoresOnHost, standbyPartitionsOnHost, topologyName);
                rebuiltMetadata.add(metadata);
                if (hostInfo.equals(thisHost)) {
                    localMetadata.set(metadata);
                }
            } else {
                log.debug("Host {} has no tasks for topology {} at the moment, this metadata will not be built", hostInfo, topologyName);
            }
        }
        // Construct metadata across all topologies on this host for the `localMetadata` field
        final Map<String, List<String>> storeToSourceTopics = topologyMetadata.stateStoreNameToSourceTopics();
        final Set<TopicPartition> localActivePartitions = activePartitionHostMap.get(thisHost);
        final Set<TopicPartition> localStandbyPartitions = standbyPartitionHostMap.get(thisHost);
        localMetadata.set(new StreamsMetadataImpl(thisHost, getStoresOnHost(storeToSourceTopics, localActivePartitions), localActivePartitions, getStoresOnHost(storeToSourceTopics, localStandbyPartitions), localStandbyPartitions));
    });
    return rebuiltMetadata;
}
Also used : StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) TopicPartition(org.apache.kafka.common.TopicPartition) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) HostInfo(org.apache.kafka.streams.state.HostInfo) HashSet(java.util.HashSet)

Aggregations

HostInfo (org.apache.kafka.streams.state.HostInfo)57 TopicPartition (org.apache.kafka.common.TopicPartition)31 HashSet (java.util.HashSet)30 Test (org.junit.Test)27 Set (java.util.Set)25 HashMap (java.util.HashMap)22 TaskId (org.apache.kafka.streams.processor.TaskId)18 AssignmentInfo (org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo)16 KsqlHostInfo (io.confluent.ksql.util.KsqlHostInfo)12 Map (java.util.Map)12 PartitionInfo (org.apache.kafka.common.PartitionInfo)11 ArrayList (java.util.ArrayList)10 UUID (java.util.UUID)9 Cluster (org.apache.kafka.common.Cluster)9 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)8 StreamsMetadata (org.apache.kafka.streams.StreamsMetadata)7 SubscriptionInfo (org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo)7 List (java.util.List)6 Node (org.apache.kafka.common.Node)6 KsqlNode (io.confluent.ksql.execution.streams.materialization.Locator.KsqlNode)5