Search in sources :

Example 16 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project apache-kafka-on-k8s by banzaicloud.

the class StreamsMetadataStateTest method before.

@Before
public void before() throws Exception {
    builder = new StreamsBuilder();
    final KStream<Object, Object> one = builder.stream("topic-one");
    one.groupByKey().count("table-one");
    final KStream<Object, Object> two = builder.stream("topic-two");
    two.groupByKey().count("table-two");
    builder.stream("topic-three").groupByKey().count("table-three");
    one.merge(two).groupByKey().count("merged-table");
    builder.stream("topic-four").mapValues(new ValueMapper<Object, Object>() {

        @Override
        public Object apply(final Object value) {
            return value;
        }
    });
    builder.globalTable("global-topic", Consumed.with(null, null), Materialized.<Object, Object, KeyValueStore<Bytes, byte[]>>as(globalTable));
    StreamsBuilderTest.internalTopologyBuilder(builder).setApplicationId("appId");
    topic1P0 = new TopicPartition("topic-one", 0);
    topic1P1 = new TopicPartition("topic-one", 1);
    topic2P0 = new TopicPartition("topic-two", 0);
    topic2P1 = new TopicPartition("topic-two", 1);
    topic3P0 = new TopicPartition("topic-three", 0);
    topic4P0 = new TopicPartition("topic-four", 0);
    hostOne = new HostInfo("host-one", 8080);
    hostTwo = new HostInfo("host-two", 9090);
    hostThree = new HostInfo("host-three", 7070);
    hostToPartitions = new HashMap<>();
    hostToPartitions.put(hostOne, Utils.mkSet(topic1P0, topic2P1, topic4P0));
    hostToPartitions.put(hostTwo, Utils.mkSet(topic2P0, topic1P1));
    hostToPartitions.put(hostThree, Collections.singleton(topic3P0));
    partitionInfos = Arrays.asList(new PartitionInfo("topic-one", 0, null, null, null), new PartitionInfo("topic-one", 1, null, null, null), new PartitionInfo("topic-two", 0, null, null, null), new PartitionInfo("topic-two", 1, null, null, null), new PartitionInfo("topic-three", 0, null, null, null), new PartitionInfo("topic-four", 0, null, null, null));
    cluster = new Cluster(null, Collections.<Node>emptyList(), partitionInfos, Collections.<String>emptySet(), Collections.<String>emptySet());
    discovery = new StreamsMetadataState(StreamsBuilderTest.internalTopologyBuilder(builder), hostOne);
    discovery.onChange(hostToPartitions, cluster);
    partitioner = new StreamPartitioner<String, Object>() {

        @Override
        public Integer partition(final String key, final Object value, final int numPartitions) {
            return 1;
        }
    };
}
Also used : Node(org.apache.kafka.common.Node) Cluster(org.apache.kafka.common.Cluster) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Bytes(org.apache.kafka.common.utils.Bytes) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo) HostInfo(org.apache.kafka.streams.state.HostInfo) Before(org.junit.Before)

Example 17 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project apache-kafka-on-k8s by banzaicloud.

the class StreamsPartitionAssignorTest method shouldNotAddStandbyTaskPartitionsToPartitionsForHost.

@Test
public void shouldNotAddStandbyTaskPartitionsToPartitionsForHost() throws Exception {
    final StreamsBuilder builder = new StreamsBuilder();
    final InternalTopologyBuilder internalTopologyBuilder = StreamsBuilderTest.internalTopologyBuilder(builder);
    internalTopologyBuilder.setApplicationId(applicationId);
    builder.stream("topic1").groupByKey().count();
    final UUID uuid = UUID.randomUUID();
    mockTaskManager(Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), uuid, internalTopologyBuilder);
    Map<String, Object> props = new HashMap<>();
    props.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1);
    props.put(StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint);
    configurePartitionAssignor(props);
    partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(streamsConfig, mockClientSupplier.restoreConsumer));
    final Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
    final Set<TaskId> emptyTasks = Collections.emptySet();
    subscriptions.put("consumer1", new PartitionAssignor.Subscription(Collections.singletonList("topic1"), new SubscriptionInfo(uuid, emptyTasks, emptyTasks, userEndPoint).encode()));
    subscriptions.put("consumer2", new PartitionAssignor.Subscription(Collections.singletonList("topic1"), new SubscriptionInfo(UUID.randomUUID(), emptyTasks, emptyTasks, "other:9090").encode()));
    final Set<TopicPartition> allPartitions = Utils.mkSet(t1p0, t1p1, t1p2);
    final Map<String, PartitionAssignor.Assignment> assign = partitionAssignor.assign(metadata, subscriptions);
    final PartitionAssignor.Assignment consumer1Assignment = assign.get("consumer1");
    final AssignmentInfo assignmentInfo = AssignmentInfo.decode(consumer1Assignment.userData());
    final Set<TopicPartition> consumer1partitions = assignmentInfo.partitionsByHost().get(new HostInfo("localhost", 8080));
    final Set<TopicPartition> consumer2Partitions = assignmentInfo.partitionsByHost().get(new HostInfo("other", 9090));
    final HashSet<TopicPartition> allAssignedPartitions = new HashSet<>(consumer1partitions);
    allAssignedPartitions.addAll(consumer2Partitions);
    assertThat(consumer1partitions, not(allPartitions));
    assertThat(consumer2Partitions, not(allPartitions));
    assertThat(allAssignedPartitions, equalTo(allPartitions));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) UUID(java.util.UUID) HostInfo(org.apache.kafka.streams.state.HostInfo) HashSet(java.util.HashSet) StreamsBuilderTest(org.apache.kafka.streams.StreamsBuilderTest) Test(org.junit.Test)

Example 18 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project apache-kafka-on-k8s by banzaicloud.

the class AssignmentInfoTest method testEncodeDecode.

@Test
public void testEncodeDecode() {
    List<TaskId> activeTasks = Arrays.asList(new TaskId(0, 0), new TaskId(0, 0), new TaskId(0, 1), new TaskId(1, 0));
    Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
    standbyTasks.put(new TaskId(1, 1), Utils.mkSet(new TopicPartition("t1", 1), new TopicPartition("t2", 1)));
    standbyTasks.put(new TaskId(2, 0), Utils.mkSet(new TopicPartition("t3", 0), new TopicPartition("t3", 0)));
    AssignmentInfo info = new AssignmentInfo(activeTasks, standbyTasks, new HashMap<HostInfo, Set<TopicPartition>>());
    AssignmentInfo decoded = AssignmentInfo.decode(info.encode());
    assertEquals(info, decoded);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) HostInfo(org.apache.kafka.streams.state.HostInfo) Test(org.junit.Test)

Example 19 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project apache-kafka-on-k8s by banzaicloud.

the class StreamsPartitionAssignor method processVersionTwoAssignment.

private void processVersionTwoAssignment(final AssignmentInfo info, final List<TopicPartition> partitions, final Map<TaskId, Set<TopicPartition>> activeTasks, final Map<TopicPartition, PartitionInfo> topicToPartitionInfo) {
    processVersionOneAssignment(info, partitions, activeTasks);
    // process partitions by host
    final Map<HostInfo, Set<TopicPartition>> partitionsByHost = info.partitionsByHost();
    for (final Set<TopicPartition> value : partitionsByHost.values()) {
        for (final TopicPartition topicPartition : value) {
            topicToPartitionInfo.put(topicPartition, new PartitionInfo(topicPartition.topic(), topicPartition.partition(), null, new Node[0], new Node[0]));
        }
    }
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) TopicPartition(org.apache.kafka.common.TopicPartition) Node(org.apache.kafka.common.Node) PartitionInfo(org.apache.kafka.common.PartitionInfo) HostInfo(org.apache.kafka.streams.state.HostInfo)

Example 20 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project kafka-streams-examples by confluentinc.

the class WordCountInteractiveQueriesExample method startRestProxy.

static WordCountInteractiveQueriesRestService startRestProxy(final KafkaStreams streams, final int port) throws Exception {
    final HostInfo hostInfo = new HostInfo(DEFAULT_HOST, port);
    final WordCountInteractiveQueriesRestService wordCountInteractiveQueriesRestService = new WordCountInteractiveQueriesRestService(streams, hostInfo);
    wordCountInteractiveQueriesRestService.start(port);
    return wordCountInteractiveQueriesRestService;
}
Also used : HostInfo(org.apache.kafka.streams.state.HostInfo)

Aggregations

HostInfo (org.apache.kafka.streams.state.HostInfo)57 TopicPartition (org.apache.kafka.common.TopicPartition)31 HashSet (java.util.HashSet)30 Test (org.junit.Test)27 Set (java.util.Set)25 HashMap (java.util.HashMap)22 TaskId (org.apache.kafka.streams.processor.TaskId)18 AssignmentInfo (org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo)16 KsqlHostInfo (io.confluent.ksql.util.KsqlHostInfo)12 Map (java.util.Map)12 PartitionInfo (org.apache.kafka.common.PartitionInfo)11 ArrayList (java.util.ArrayList)10 UUID (java.util.UUID)9 Cluster (org.apache.kafka.common.Cluster)9 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)8 StreamsMetadata (org.apache.kafka.streams.StreamsMetadata)7 SubscriptionInfo (org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo)7 List (java.util.List)6 Node (org.apache.kafka.common.Node)6 KsqlNode (io.confluent.ksql.execution.streams.materialization.Locator.KsqlNode)5