Search in sources :

Example 46 with MockProcessorSupplier

use of org.apache.kafka.test.MockProcessorSupplier in project kafka by apache.

the class StreamPartitionAssignorTest method testAssignWithNewTasks.

@Test
public void testAssignWithNewTasks() throws Exception {
    builder.addSource("source1", "topic1");
    builder.addSource("source2", "topic2");
    builder.addSource("source3", "topic3");
    builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2", "source3");
    List<String> topics = Utils.mkList("topic1", "topic2", "topic3");
    Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2, task3);
    // assuming that previous tasks do not have topic3
    final Set<TaskId> prevTasks10 = Utils.mkSet(task0);
    final Set<TaskId> prevTasks11 = Utils.mkSet(task1);
    final Set<TaskId> prevTasks20 = Utils.mkSet(task2);
    UUID uuid1 = UUID.randomUUID();
    UUID uuid2 = UUID.randomUUID();
    String client1 = "client1";
    StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    partitionAssignor.configure(config.getConsumerConfigs(thread10, "test", client1));
    partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer));
    Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
    subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks10, Collections.<TaskId>emptySet(), userEndPoint).encode()));
    subscriptions.put("consumer11", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks11, Collections.<TaskId>emptySet(), userEndPoint).encode()));
    subscriptions.put("consumer20", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid2, prevTasks20, Collections.<TaskId>emptySet(), userEndPoint).encode()));
    Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
    // check assigned partitions: since there is no previous task for topic 3 it will be assigned randomly so we cannot check exact match
    // also note that previously assigned partitions / tasks may not stay on the previous host since we may assign the new task first and
    // then later ones will be re-assigned to other hosts due to load balancing
    Set<TaskId> allActiveTasks = new HashSet<>();
    Set<TopicPartition> allPartitions = new HashSet<>();
    AssignmentInfo info;
    info = AssignmentInfo.decode(assignments.get("consumer10").userData());
    allActiveTasks.addAll(info.activeTasks);
    allPartitions.addAll(assignments.get("consumer10").partitions());
    info = AssignmentInfo.decode(assignments.get("consumer11").userData());
    allActiveTasks.addAll(info.activeTasks);
    allPartitions.addAll(assignments.get("consumer11").partitions());
    info = AssignmentInfo.decode(assignments.get("consumer20").userData());
    allActiveTasks.addAll(info.activeTasks);
    allPartitions.addAll(assignments.get("consumer20").partitions());
    assertEquals(allTasks, allActiveTasks);
    assertEquals(Utils.mkSet(t1p0, t1p1, t1p2, t2p0, t2p1, t2p2, t3p0, t3p1, t3p2, t3p3), allPartitions);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Metrics(org.apache.kafka.common.metrics.Metrics) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) UUID(java.util.UUID) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 47 with MockProcessorSupplier

use of org.apache.kafka.test.MockProcessorSupplier in project kafka by apache.

the class StreamPartitionAssignorTest method testAssignWithStates.

@Test
public void testAssignWithStates() throws Exception {
    String applicationId = "test";
    builder.setApplicationId(applicationId);
    builder.addSource("source1", "topic1");
    builder.addSource("source2", "topic2");
    builder.addProcessor("processor-1", new MockProcessorSupplier(), "source1");
    builder.addStateStore(new MockStateStoreSupplier("store1", false), "processor-1");
    builder.addProcessor("processor-2", new MockProcessorSupplier(), "source2");
    builder.addStateStore(new MockStateStoreSupplier("store2", false), "processor-2");
    builder.addStateStore(new MockStateStoreSupplier("store3", false), "processor-2");
    List<String> topics = Utils.mkList("topic1", "topic2");
    TaskId task00 = new TaskId(0, 0);
    TaskId task01 = new TaskId(0, 1);
    TaskId task02 = new TaskId(0, 2);
    TaskId task10 = new TaskId(1, 0);
    TaskId task11 = new TaskId(1, 1);
    TaskId task12 = new TaskId(1, 2);
    List<TaskId> tasks = Utils.mkList(task00, task01, task02, task10, task11, task12);
    UUID uuid1 = UUID.randomUUID();
    UUID uuid2 = UUID.randomUUID();
    String client1 = "client1";
    StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    partitionAssignor.configure(config.getConsumerConfigs(thread10, applicationId, client1));
    partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer));
    Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
    subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
    subscriptions.put("consumer11", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
    subscriptions.put("consumer20", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid2, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
    Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
    // check assigned partition size: since there is no previous task and there are two sub-topologies the assignment is random so we cannot check exact match
    assertEquals(2, assignments.get("consumer10").partitions().size());
    assertEquals(2, assignments.get("consumer11").partitions().size());
    assertEquals(2, assignments.get("consumer20").partitions().size());
    AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
    AssignmentInfo info11 = AssignmentInfo.decode(assignments.get("consumer11").userData());
    AssignmentInfo info20 = AssignmentInfo.decode(assignments.get("consumer20").userData());
    assertEquals(2, info10.activeTasks.size());
    assertEquals(2, info11.activeTasks.size());
    assertEquals(2, info20.activeTasks.size());
    Set<TaskId> allTasks = new HashSet<>();
    allTasks.addAll(info10.activeTasks);
    allTasks.addAll(info11.activeTasks);
    allTasks.addAll(info20.activeTasks);
    assertEquals(new HashSet<>(tasks), allTasks);
    // check tasks for state topics
    Map<Integer, TopologyBuilder.TopicsInfo> topicGroups = thread10.builder.topicGroups();
    assertEquals(Utils.mkSet(task00, task01, task02), tasksForState(applicationId, "store1", tasks, topicGroups));
    assertEquals(Utils.mkSet(task10, task11, task12), tasksForState(applicationId, "store2", tasks, topicGroups));
    assertEquals(Utils.mkSet(task10, task11, task12), tasksForState(applicationId, "store3", tasks, topicGroups));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Metrics(org.apache.kafka.common.metrics.Metrics) MockStateStoreSupplier(org.apache.kafka.test.MockStateStoreSupplier) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) UUID(java.util.UUID) HashSet(java.util.HashSet) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) Test(org.junit.Test)

Example 48 with MockProcessorSupplier

use of org.apache.kafka.test.MockProcessorSupplier in project kafka by apache.

the class StreamPartitionAssignorTest method testAssignEmptyMetadata.

@Test
public void testAssignEmptyMetadata() throws Exception {
    builder.addSource("source1", "topic1");
    builder.addSource("source2", "topic2");
    builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");
    List<String> topics = Utils.mkList("topic1", "topic2");
    Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2);
    final Set<TaskId> prevTasks10 = Utils.mkSet(task0);
    final Set<TaskId> standbyTasks10 = Utils.mkSet(task1);
    final Cluster emptyMetadata = new Cluster("cluster", Collections.singletonList(Node.noNode()), Collections.<PartitionInfo>emptySet(), Collections.<String>emptySet(), Collections.<String>emptySet());
    UUID uuid1 = UUID.randomUUID();
    String client1 = "client1";
    StreamThread thread10 = new StreamThread(builder, config, new MockClientSupplier(), "test", client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    partitionAssignor.configure(config.getConsumerConfigs(thread10, "test", client1));
    Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
    subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks10, standbyTasks10, userEndPoint).encode()));
    // initially metadata is empty
    Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(emptyMetadata, subscriptions);
    // check assigned partitions
    assertEquals(Collections.<TopicPartition>emptySet(), new HashSet<>(assignments.get("consumer10").partitions()));
    // check assignment info
    Set<TaskId> allActiveTasks = new HashSet<>();
    AssignmentInfo info10 = checkAssignment(Collections.<String>emptySet(), assignments.get("consumer10"));
    allActiveTasks.addAll(info10.activeTasks);
    assertEquals(0, allActiveTasks.size());
    assertEquals(Collections.<TaskId>emptySet(), new HashSet<>(allActiveTasks));
    // then metadata gets populated
    assignments = partitionAssignor.assign(metadata, subscriptions);
    // check assigned partitions
    assertEquals(Utils.mkSet(Utils.mkSet(t1p0, t2p0, t1p0, t2p0, t1p1, t2p1, t1p2, t2p2)), Utils.mkSet(new HashSet<>(assignments.get("consumer10").partitions())));
    // the first consumer
    info10 = checkAssignment(allTopics, assignments.get("consumer10"));
    allActiveTasks.addAll(info10.activeTasks);
    assertEquals(3, allActiveTasks.size());
    assertEquals(allTasks, new HashSet<>(allActiveTasks));
    assertEquals(3, allActiveTasks.size());
    assertEquals(allTasks, allActiveTasks);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) Cluster(org.apache.kafka.common.Cluster) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Metrics(org.apache.kafka.common.metrics.Metrics) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) UUID(java.util.UUID) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 49 with MockProcessorSupplier

use of org.apache.kafka.test.MockProcessorSupplier in project kafka by apache.

the class StreamPartitionAssignorTest method testAssignBasic.

@Test
public void testAssignBasic() throws Exception {
    builder.addSource("source1", "topic1");
    builder.addSource("source2", "topic2");
    builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");
    List<String> topics = Utils.mkList("topic1", "topic2");
    Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2);
    final Set<TaskId> prevTasks10 = Utils.mkSet(task0);
    final Set<TaskId> prevTasks11 = Utils.mkSet(task1);
    final Set<TaskId> prevTasks20 = Utils.mkSet(task2);
    final Set<TaskId> standbyTasks10 = Utils.mkSet(task1);
    final Set<TaskId> standbyTasks11 = Utils.mkSet(task2);
    final Set<TaskId> standbyTasks20 = Utils.mkSet(task0);
    UUID uuid1 = UUID.randomUUID();
    UUID uuid2 = UUID.randomUUID();
    String client1 = "client1";
    StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    partitionAssignor.configure(config.getConsumerConfigs(thread10, "test", client1));
    partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer));
    Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
    subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks10, standbyTasks10, userEndPoint).encode()));
    subscriptions.put("consumer11", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks11, standbyTasks11, userEndPoint).encode()));
    subscriptions.put("consumer20", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid2, prevTasks20, standbyTasks20, userEndPoint).encode()));
    Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
    // check assigned partitions
    assertEquals(Utils.mkSet(Utils.mkSet(t1p0, t2p0), Utils.mkSet(t1p1, t2p1)), Utils.mkSet(new HashSet<>(assignments.get("consumer10").partitions()), new HashSet<>(assignments.get("consumer11").partitions())));
    assertEquals(Utils.mkSet(t1p2, t2p2), new HashSet<>(assignments.get("consumer20").partitions()));
    // check assignment info
    Set<TaskId> allActiveTasks = new HashSet<>();
    // the first consumer
    AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
    allActiveTasks.addAll(info10.activeTasks);
    // the second consumer
    AssignmentInfo info11 = checkAssignment(allTopics, assignments.get("consumer11"));
    allActiveTasks.addAll(info11.activeTasks);
    assertEquals(Utils.mkSet(task0, task1), allActiveTasks);
    // the third consumer
    AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
    allActiveTasks.addAll(info20.activeTasks);
    assertEquals(3, allActiveTasks.size());
    assertEquals(allTasks, new HashSet<>(allActiveTasks));
    assertEquals(3, allActiveTasks.size());
    assertEquals(allTasks, allActiveTasks);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Metrics(org.apache.kafka.common.metrics.Metrics) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) UUID(java.util.UUID) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 50 with MockProcessorSupplier

use of org.apache.kafka.test.MockProcessorSupplier in project kafka by apache.

the class StreamPartitionAssignorTest method shouldMapUserEndPointToTopicPartitions.

@Test
public void shouldMapUserEndPointToTopicPartitions() throws Exception {
    final Properties properties = configProps();
    final String myEndPoint = "localhost:8080";
    properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, myEndPoint);
    final StreamsConfig config = new StreamsConfig(properties);
    final String applicationId = "application-id";
    builder.setApplicationId(applicationId);
    builder.addSource("source", "topic1");
    builder.addProcessor("processor", new MockProcessorSupplier(), "source");
    builder.addSink("sink", "output", "processor");
    final List<String> topics = Utils.mkList("topic1");
    final UUID uuid1 = UUID.randomUUID();
    final String client1 = "client1";
    final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    final StreamPartitionAssignor partitionAssignor = new StreamPartitionAssignor();
    partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1));
    partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(streamThread.config, mockClientSupplier.restoreConsumer));
    final Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
    final Set<TaskId> emptyTasks = Collections.emptySet();
    subscriptions.put("consumer1", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, emptyTasks, emptyTasks, myEndPoint).encode()));
    final Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
    final PartitionAssignor.Assignment consumerAssignment = assignments.get("consumer1");
    final AssignmentInfo assignmentInfo = AssignmentInfo.decode(consumerAssignment.userData());
    final Set<TopicPartition> topicPartitions = assignmentInfo.partitionsByHost.get(new HostInfo("localhost", 8080));
    assertEquals(Utils.mkSet(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic1", 2)), topicPartitions);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) Properties(java.util.Properties) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Metrics(org.apache.kafka.common.metrics.Metrics) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) UUID(java.util.UUID) StreamsConfig(org.apache.kafka.streams.StreamsConfig) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) TopicPartition(org.apache.kafka.common.TopicPartition) HostInfo(org.apache.kafka.streams.state.HostInfo) Test(org.junit.Test)

Aggregations

MockProcessorSupplier (org.apache.kafka.test.MockProcessorSupplier)52 Test (org.junit.Test)51 KStreamTestDriver (org.apache.kafka.test.KStreamTestDriver)26 KStreamBuilder (org.apache.kafka.streams.kstream.KStreamBuilder)25 HashMap (java.util.HashMap)15 Metrics (org.apache.kafka.common.metrics.Metrics)14 TaskId (org.apache.kafka.streams.processor.TaskId)13 UUID (java.util.UUID)12 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)12 SubscriptionInfo (org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo)11 HashSet (java.util.HashSet)9 AssignmentInfo (org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo)8 MockInternalTopicManager (org.apache.kafka.test.MockInternalTopicManager)8 MockStateStoreSupplier (org.apache.kafka.test.MockStateStoreSupplier)8 Properties (java.util.Properties)7 StreamsConfig (org.apache.kafka.streams.StreamsConfig)6 TopicPartition (org.apache.kafka.common.TopicPartition)4 KeyValueMapper (org.apache.kafka.streams.kstream.KeyValueMapper)4 Predicate (org.apache.kafka.streams.kstream.Predicate)4 TopicsInfo (org.apache.kafka.streams.processor.TopologyBuilder.TopicsInfo)4