Search in sources :

Example 96 with Set

use of java.util.Set in project kafka by apache.

the class TopologyBuilder method topicGroups.

/**
     * Returns the map of topic groups keyed by the group id.
     * A topic group is a group of topics in the same task.
     *
     * @return groups of topic names
     */
public synchronized Map<Integer, TopicsInfo> topicGroups() {
    Map<Integer, TopicsInfo> topicGroups = new LinkedHashMap<>();
    if (nodeGroups == null)
        nodeGroups = makeNodeGroups();
    for (Map.Entry<Integer, Set<String>> entry : nodeGroups.entrySet()) {
        Set<String> sinkTopics = new HashSet<>();
        Set<String> sourceTopics = new HashSet<>();
        Map<String, InternalTopicConfig> internalSourceTopics = new HashMap<>();
        Map<String, InternalTopicConfig> stateChangelogTopics = new HashMap<>();
        for (String node : entry.getValue()) {
            // if the node is a source node, add to the source topics
            List<String> topics = nodeToSourceTopics.get(node);
            if (topics != null) {
                // if some of the topics are internal, add them to the internal topics
                for (String topic : topics) {
                    // skip global topic as they don't need partition assignment
                    if (globalTopics.contains(topic)) {
                        continue;
                    }
                    if (this.internalTopicNames.contains(topic)) {
                        // prefix the internal topic name with the application id
                        String internalTopic = decorateTopic(topic);
                        internalSourceTopics.put(internalTopic, new InternalTopicConfig(internalTopic, Collections.singleton(InternalTopicConfig.CleanupPolicy.delete), Collections.<String, String>emptyMap()));
                        sourceTopics.add(internalTopic);
                    } else {
                        sourceTopics.add(topic);
                    }
                }
            }
            // if the node is a sink node, add to the sink topics
            String topic = nodeToSinkTopic.get(node);
            if (topic != null) {
                if (internalTopicNames.contains(topic)) {
                    // prefix the change log topic name with the application id
                    sinkTopics.add(decorateTopic(topic));
                } else {
                    sinkTopics.add(topic);
                }
            }
            // if the node is connected to a state, add to the state topics
            for (StateStoreFactory stateFactory : stateFactories.values()) {
                final StateStoreSupplier supplier = stateFactory.supplier;
                if (supplier.loggingEnabled() && stateFactory.users.contains(node)) {
                    final String name = ProcessorStateManager.storeChangelogTopic(applicationId, supplier.name());
                    final InternalTopicConfig internalTopicConfig = createInternalTopicConfig(supplier, name);
                    stateChangelogTopics.put(name, internalTopicConfig);
                }
            }
        }
        if (!sourceTopics.isEmpty()) {
            topicGroups.put(entry.getKey(), new TopicsInfo(Collections.unmodifiableSet(sinkTopics), Collections.unmodifiableSet(sourceTopics), Collections.unmodifiableMap(internalSourceTopics), Collections.unmodifiableMap(stateChangelogTopics)));
        }
    }
    return Collections.unmodifiableMap(topicGroups);
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) InternalTopicConfig(org.apache.kafka.streams.processor.internals.InternalTopicConfig) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) HashSet(java.util.HashSet)

Example 97 with Set

use of java.util.Set in project kafka by apache.

the class StreamPartitionAssignor method onAssignment.

/**
     * @throws TaskAssignmentException if there is no task id for one of the partitions specified
     */
@Override
public void onAssignment(Assignment assignment) {
    List<TopicPartition> partitions = new ArrayList<>(assignment.partitions());
    Collections.sort(partitions, PARTITION_COMPARATOR);
    AssignmentInfo info = AssignmentInfo.decode(assignment.userData());
    this.standbyTasks = info.standbyTasks;
    this.activeTasks = new HashMap<>();
    // could be duplicated if one task has more than one assigned partitions
    if (partitions.size() != info.activeTasks.size()) {
        throw new TaskAssignmentException(String.format("stream-thread [%s] Number of assigned partitions %d is not equal to the number of active taskIds %d" + ", assignmentInfo=%s", streamThread.getName(), partitions.size(), info.activeTasks.size(), info.toString()));
    }
    for (int i = 0; i < partitions.size(); i++) {
        TopicPartition partition = partitions.get(i);
        TaskId id = info.activeTasks.get(i);
        Set<TopicPartition> assignedPartitions = activeTasks.get(id);
        if (assignedPartitions == null) {
            assignedPartitions = new HashSet<>();
            activeTasks.put(id, assignedPartitions);
        }
        assignedPartitions.add(partition);
    }
    this.partitionsByHostState = info.partitionsByHost;
    final Collection<Set<TopicPartition>> values = partitionsByHostState.values();
    final Map<TopicPartition, PartitionInfo> topicToPartitionInfo = new HashMap<>();
    for (Set<TopicPartition> value : values) {
        for (TopicPartition topicPartition : value) {
            topicToPartitionInfo.put(topicPartition, new PartitionInfo(topicPartition.topic(), topicPartition.partition(), null, new Node[0], new Node[0]));
        }
    }
    metadataWithInternalTopics = Cluster.empty().withPartitions(topicToPartitionInfo);
}
Also used : TaskAssignmentException(org.apache.kafka.streams.errors.TaskAssignmentException) TaskId(org.apache.kafka.streams.processor.TaskId) HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo)

Example 98 with Set

use of java.util.Set in project kafka by apache.

the class KStreamBuilderTest method shouldAddGlobalTablesToEachGroup.

@Test
public void shouldAddGlobalTablesToEachGroup() throws Exception {
    final String one = "globalTable";
    final String two = "globalTable2";
    final GlobalKTable<String, String> globalTable = builder.globalTable("table", one);
    final GlobalKTable<String, String> globalTable2 = builder.globalTable("table2", two);
    builder.table("not-global", "not-global");
    final KeyValueMapper<String, String, String> kvMapper = new KeyValueMapper<String, String, String>() {

        @Override
        public String apply(final String key, final String value) {
            return value;
        }
    };
    final KStream<String, String> stream = builder.stream("t1");
    stream.leftJoin(globalTable, kvMapper, MockValueJoiner.TOSTRING_JOINER);
    final KStream<String, String> stream2 = builder.stream("t2");
    stream2.leftJoin(globalTable2, kvMapper, MockValueJoiner.TOSTRING_JOINER);
    final Map<Integer, Set<String>> nodeGroups = builder.nodeGroups();
    for (Integer groupId : nodeGroups.keySet()) {
        final ProcessorTopology topology = builder.build(groupId);
        final List<StateStore> stateStores = topology.globalStateStores();
        final Set<String> names = new HashSet<>();
        for (StateStore stateStore : stateStores) {
            names.add(stateStore.name());
        }
        assertEquals(2, stateStores.size());
        assertTrue(names.contains(one));
        assertTrue(names.contains(two));
    }
}
Also used : ProcessorTopology(org.apache.kafka.streams.processor.internals.ProcessorTopology) Set(java.util.Set) HashSet(java.util.HashSet) StateStore(org.apache.kafka.streams.processor.StateStore) MockKeyValueMapper(org.apache.kafka.test.MockKeyValueMapper) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 99 with Set

use of java.util.Set in project kafka by apache.

the class KStreamKStreamJoinTest method testAsymetricWindowingBefore.

@Test
public void testAsymetricWindowingBefore() throws Exception {
    long time = 1000L;
    KStreamBuilder builder = new KStreamBuilder();
    final int[] expectedKeys = new int[] { 0, 1, 2, 3 };
    KStream<Integer, String> stream1;
    KStream<Integer, String> stream2;
    KStream<Integer, String> joined;
    MockProcessorSupplier<Integer, String> processor;
    processor = new MockProcessorSupplier<>();
    stream1 = builder.stream(intSerde, stringSerde, topic1);
    stream2 = builder.stream(intSerde, stringSerde, topic2);
    joined = stream1.join(stream2, MockValueJoiner.TOSTRING_JOINER, JoinWindows.of(0).before(100), intSerde, stringSerde, stringSerde);
    joined.process(processor);
    Collection<Set<String>> copartitionGroups = builder.copartitionGroups();
    assertEquals(1, copartitionGroups.size());
    assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next());
    driver = new KStreamTestDriver(builder, stateDir);
    for (int i = 0; i < expectedKeys.length; i++) {
        setRecordContext(time + i, topic1);
        driver.process(topic1, expectedKeys[i], "X" + expectedKeys[i]);
    }
    processor.checkAndClearProcessResult();
    time = 1000L - 100L - 1L;
    setRecordContext(time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult();
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:X0+YY0");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:X0+YY0", "1:X1+YY1");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:X0+YY0", "1:X1+YY1", "2:X2+YY2");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:X0+YY0", "1:X1+YY1", "2:X2+YY2", "3:X3+YY3");
    time = 1000L;
    setRecordContext(time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:X0+YY0", "1:X1+YY1", "2:X2+YY2", "3:X3+YY3");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("1:X1+YY1", "2:X2+YY2", "3:X3+YY3");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("2:X2+YY2", "3:X3+YY3");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("3:X3+YY3");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult();
}
Also used : KStreamBuilder(org.apache.kafka.streams.kstream.KStreamBuilder) Set(java.util.Set) HashSet(java.util.HashSet) KStreamTestDriver(org.apache.kafka.test.KStreamTestDriver) Test(org.junit.Test)

Example 100 with Set

use of java.util.Set in project kafka by apache.

the class KStreamKStreamJoinTest method testWindowing.

@Test
public void testWindowing() throws Exception {
    long time = 0L;
    KStreamBuilder builder = new KStreamBuilder();
    final int[] expectedKeys = new int[] { 0, 1, 2, 3 };
    KStream<Integer, String> stream1;
    KStream<Integer, String> stream2;
    KStream<Integer, String> joined;
    MockProcessorSupplier<Integer, String> processor;
    processor = new MockProcessorSupplier<>();
    stream1 = builder.stream(intSerde, stringSerde, topic1);
    stream2 = builder.stream(intSerde, stringSerde, topic2);
    joined = stream1.join(stream2, MockValueJoiner.TOSTRING_JOINER, JoinWindows.of(100), intSerde, stringSerde, stringSerde);
    joined.process(processor);
    Collection<Set<String>> copartitionGroups = builder.copartitionGroups();
    assertEquals(1, copartitionGroups.size());
    assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next());
    driver = new KStreamTestDriver(builder, stateDir);
    // push two items to the primary stream. the other window is empty. this should produce no items.
    // w1 = {}
    // w2 = {}
    // --> w1 = { 0:X0, 1:X1 }
    //     w2 = {}
    setRecordContext(time, topic1);
    for (int i = 0; i < 2; i++) {
        driver.process(topic1, expectedKeys[i], "X" + expectedKeys[i]);
    }
    processor.checkAndClearProcessResult();
    // push two items to the other stream. this should produce two items.
    // w1 = { 0:X0, 1:X1 }
    // w2 = {}
    // --> w1 = { 0:X0, 1:X1 }
    //     w2 = { 0:Y0, 1:Y1 }
    setRecordContext(time, topic2);
    for (int i = 0; i < 2; i++) {
        driver.process(topic2, expectedKeys[i], "Y" + expectedKeys[i]);
    }
    processor.checkAndClearProcessResult("0:X0+Y0", "1:X1+Y1");
    // clear logically
    time = 1000L;
    setRecordContext(time, topic1);
    for (int i = 0; i < expectedKeys.length; i++) {
        setRecordContext(time + i, topic1);
        driver.process(topic1, expectedKeys[i], "X" + expectedKeys[i]);
    }
    processor.checkAndClearProcessResult();
    // gradually expires items in w1
    // w1 = { 0:X0, 1:X1, 2:X2, 3:X3 }
    time = 1000 + 100L;
    setRecordContext(time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:X0+YY0", "1:X1+YY1", "2:X2+YY2", "3:X3+YY3");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("1:X1+YY1", "2:X2+YY2", "3:X3+YY3");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("2:X2+YY2", "3:X3+YY3");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("3:X3+YY3");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult();
    // go back to the time before expiration
    time = 1000L - 100L - 1L;
    setRecordContext(time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult();
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:X0+YY0");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:X0+YY0", "1:X1+YY1");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:X0+YY0", "1:X1+YY1", "2:X2+YY2");
    setRecordContext(++time, topic2);
    for (int expectedKey : expectedKeys) {
        driver.process(topic2, expectedKey, "YY" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:X0+YY0", "1:X1+YY1", "2:X2+YY2", "3:X3+YY3");
    // clear (logically)
    time = 2000L;
    for (int i = 0; i < expectedKeys.length; i++) {
        setRecordContext(time + i, topic2);
        driver.process(topic2, expectedKeys[i], "Y" + expectedKeys[i]);
    }
    processor.checkAndClearProcessResult();
    // gradually expires items in w2
    // w2 = { 0:Y0, 1:Y1, 2:Y2, 3:Y3 }
    time = 2000L + 100L;
    setRecordContext(time, topic1);
    for (int expectedKey : expectedKeys) {
        driver.process(topic1, expectedKey, "XX" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:XX0+Y0", "1:XX1+Y1", "2:XX2+Y2", "3:XX3+Y3");
    setRecordContext(++time, topic1);
    for (int expectedKey : expectedKeys) {
        driver.process(topic1, expectedKey, "XX" + expectedKey);
    }
    processor.checkAndClearProcessResult("1:XX1+Y1", "2:XX2+Y2", "3:XX3+Y3");
    setRecordContext(++time, topic1);
    for (int expectedKey : expectedKeys) {
        driver.process(topic1, expectedKey, "XX" + expectedKey);
    }
    processor.checkAndClearProcessResult("2:XX2+Y2", "3:XX3+Y3");
    setRecordContext(++time, topic1);
    for (int expectedKey : expectedKeys) {
        driver.process(topic1, expectedKey, "XX" + expectedKey);
    }
    processor.checkAndClearProcessResult("3:XX3+Y3");
    setRecordContext(++time, topic1);
    for (int expectedKey : expectedKeys) {
        driver.process(topic1, expectedKey, "XX" + expectedKey);
    }
    processor.checkAndClearProcessResult();
    // go back to the time before expiration
    time = 2000L - 100L - 1L;
    setRecordContext(time, topic1);
    for (int expectedKey : expectedKeys) {
        driver.process(topic1, expectedKey, "XX" + expectedKey);
    }
    processor.checkAndClearProcessResult();
    setRecordContext(++time, topic1);
    for (int expectedKey : expectedKeys) {
        driver.process(topic1, expectedKey, "XX" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:XX0+Y0");
    setRecordContext(++time, topic1);
    for (int expectedKey : expectedKeys) {
        driver.process(topic1, expectedKey, "XX" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:XX0+Y0", "1:XX1+Y1");
    setRecordContext(++time, topic1);
    for (int expectedKey : expectedKeys) {
        driver.process(topic1, expectedKey, "XX" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:XX0+Y0", "1:XX1+Y1", "2:XX2+Y2");
    setRecordContext(++time, topic1);
    for (int expectedKey : expectedKeys) {
        driver.process(topic1, expectedKey, "XX" + expectedKey);
    }
    processor.checkAndClearProcessResult("0:XX0+Y0", "1:XX1+Y1", "2:XX2+Y2", "3:XX3+Y3");
}
Also used : KStreamBuilder(org.apache.kafka.streams.kstream.KStreamBuilder) Set(java.util.Set) HashSet(java.util.HashSet) KStreamTestDriver(org.apache.kafka.test.KStreamTestDriver) Test(org.junit.Test)

Aggregations

Set (java.util.Set)6789 HashSet (java.util.HashSet)4372 HashMap (java.util.HashMap)2090 Map (java.util.Map)1865 Iterator (java.util.Iterator)1774 ArrayList (java.util.ArrayList)1113 List (java.util.List)980 Test (org.junit.Test)920 TreeSet (java.util.TreeSet)536 IOException (java.io.IOException)501 SSOException (com.iplanet.sso.SSOException)467 LinkedHashSet (java.util.LinkedHashSet)418 SMSException (com.sun.identity.sm.SMSException)347 IdRepoException (com.sun.identity.idm.IdRepoException)268 Collection (java.util.Collection)259 ImmutableSet (com.google.common.collect.ImmutableSet)256 File (java.io.File)245 SSOToken (com.iplanet.sso.SSOToken)226 Collectors (java.util.stream.Collectors)219 Test (org.testng.annotations.Test)209