Search in sources :

Example 11 with MockStateStoreSupplier

use of org.apache.kafka.test.MockStateStoreSupplier in project kafka by apache.

the class StreamPartitionAssignorTest method testAssignWithStates.

@Test
public void testAssignWithStates() throws Exception {
    String applicationId = "test";
    builder.setApplicationId(applicationId);
    builder.addSource("source1", "topic1");
    builder.addSource("source2", "topic2");
    builder.addProcessor("processor-1", new MockProcessorSupplier(), "source1");
    builder.addStateStore(new MockStateStoreSupplier("store1", false), "processor-1");
    builder.addProcessor("processor-2", new MockProcessorSupplier(), "source2");
    builder.addStateStore(new MockStateStoreSupplier("store2", false), "processor-2");
    builder.addStateStore(new MockStateStoreSupplier("store3", false), "processor-2");
    List<String> topics = Utils.mkList("topic1", "topic2");
    TaskId task00 = new TaskId(0, 0);
    TaskId task01 = new TaskId(0, 1);
    TaskId task02 = new TaskId(0, 2);
    TaskId task10 = new TaskId(1, 0);
    TaskId task11 = new TaskId(1, 1);
    TaskId task12 = new TaskId(1, 2);
    List<TaskId> tasks = Utils.mkList(task00, task01, task02, task10, task11, task12);
    UUID uuid1 = UUID.randomUUID();
    UUID uuid2 = UUID.randomUUID();
    String client1 = "client1";
    StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    partitionAssignor.configure(config.getConsumerConfigs(thread10, applicationId, client1));
    partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer));
    Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
    subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
    subscriptions.put("consumer11", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
    subscriptions.put("consumer20", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid2, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
    Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
    // check assigned partition size: since there is no previous task and there are two sub-topologies the assignment is random so we cannot check exact match
    assertEquals(2, assignments.get("consumer10").partitions().size());
    assertEquals(2, assignments.get("consumer11").partitions().size());
    assertEquals(2, assignments.get("consumer20").partitions().size());
    AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
    AssignmentInfo info11 = AssignmentInfo.decode(assignments.get("consumer11").userData());
    AssignmentInfo info20 = AssignmentInfo.decode(assignments.get("consumer20").userData());
    assertEquals(2, info10.activeTasks.size());
    assertEquals(2, info11.activeTasks.size());
    assertEquals(2, info20.activeTasks.size());
    Set<TaskId> allTasks = new HashSet<>();
    allTasks.addAll(info10.activeTasks);
    allTasks.addAll(info11.activeTasks);
    allTasks.addAll(info20.activeTasks);
    assertEquals(new HashSet<>(tasks), allTasks);
    // check tasks for state topics
    Map<Integer, TopologyBuilder.TopicsInfo> topicGroups = thread10.builder.topicGroups();
    assertEquals(Utils.mkSet(task00, task01, task02), tasksForState(applicationId, "store1", tasks, topicGroups));
    assertEquals(Utils.mkSet(task10, task11, task12), tasksForState(applicationId, "store2", tasks, topicGroups));
    assertEquals(Utils.mkSet(task10, task11, task12), tasksForState(applicationId, "store3", tasks, topicGroups));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Metrics(org.apache.kafka.common.metrics.Metrics) MockStateStoreSupplier(org.apache.kafka.test.MockStateStoreSupplier) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) UUID(java.util.UUID) HashSet(java.util.HashSet) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) Test(org.junit.Test)

Example 12 with MockStateStoreSupplier

use of org.apache.kafka.test.MockStateStoreSupplier in project kafka by apache.

the class StreamPartitionAssignorTest method testAssignWithPartialTopology.

@Test
public void testAssignWithPartialTopology() throws Exception {
    Properties props = configProps();
    props.put(StreamsConfig.PARTITION_GROUPER_CLASS_CONFIG, SingleGroupPartitionGrouperStub.class);
    StreamsConfig config = new StreamsConfig(props);
    builder.addSource("source1", "topic1");
    builder.addProcessor("processor1", new MockProcessorSupplier(), "source1");
    builder.addStateStore(new MockStateStoreSupplier("store1", false), "processor1");
    builder.addSource("source2", "topic2");
    builder.addProcessor("processor2", new MockProcessorSupplier(), "source2");
    builder.addStateStore(new MockStateStoreSupplier("store2", false), "processor2");
    List<String> topics = Utils.mkList("topic1", "topic2");
    Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2);
    UUID uuid1 = UUID.randomUUID();
    String client1 = "client1";
    StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    partitionAssignor.configure(config.getConsumerConfigs(thread10, "test", client1));
    partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer));
    Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
    subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
    // will throw exception if it fails
    Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
    // check assignment info
    Set<TaskId> allActiveTasks = new HashSet<>();
    AssignmentInfo info10 = checkAssignment(Utils.mkSet("topic1"), assignments.get("consumer10"));
    allActiveTasks.addAll(info10.activeTasks);
    assertEquals(3, allActiveTasks.size());
    assertEquals(allTasks, new HashSet<>(allActiveTasks));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) Properties(java.util.Properties) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Metrics(org.apache.kafka.common.metrics.Metrics) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) MockStateStoreSupplier(org.apache.kafka.test.MockStateStoreSupplier) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) UUID(java.util.UUID) StreamsConfig(org.apache.kafka.streams.StreamsConfig) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 13 with MockStateStoreSupplier

use of org.apache.kafka.test.MockStateStoreSupplier in project kafka by apache.

the class StreamThreadTest method shouldNotViolateAtLeastOnceWhenAnExceptionOccursOnTaskFlushDuringShutdown.

@Test
public void shouldNotViolateAtLeastOnceWhenAnExceptionOccursOnTaskFlushDuringShutdown() throws Exception {
    final KStreamBuilder builder = new KStreamBuilder();
    builder.setApplicationId(applicationId);
    final MockStateStoreSupplier.MockStateStore stateStore = new MockStateStoreSupplier.MockStateStore("foo", false);
    builder.stream("t1").groupByKey().count(new MockStateStoreSupplier(stateStore));
    final StreamsConfig config = new StreamsConfig(configProps());
    final MockClientSupplier clientSupplier = new MockClientSupplier();
    final TestStreamTask testStreamTask = new TestStreamTask(new TaskId(0, 0), applicationId, Utils.mkSet(new TopicPartition("t1", 0)), builder.build(0), clientSupplier.consumer, clientSupplier.producer, clientSupplier.restoreConsumer, config, new MockStreamsMetrics(new Metrics()), new StateDirectory(applicationId, config.getString(StreamsConfig.STATE_DIR_CONFIG), time)) {

        @Override
        public void flushState() {
            throw new RuntimeException("KABOOM!");
        }
    };
    final StreamThread thread = new StreamThread(builder, config, clientSupplier, applicationId, clientId, processId, new Metrics(), new MockTime(), new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {

        @Override
        protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) {
            return testStreamTask;
        }
    };
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    activeTasks.put(testStreamTask.id, testStreamTask.partitions);
    thread.partitionAssignor(new MockStreamsPartitionAssignor(activeTasks));
    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(testStreamTask.partitions);
    // store should have been opened
    assertTrue(stateStore.isOpen());
    thread.start();
    thread.close();
    thread.join();
    assertFalse("task shouldn't have been committed as there was an exception during shutdown", testStreamTask.committed);
    // store should be closed even if we had an exception
    assertFalse(stateStore.isOpen());
}
Also used : KStreamBuilder(org.apache.kafka.streams.kstream.KStreamBuilder) TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) MockStateStoreSupplier(org.apache.kafka.test.MockStateStoreSupplier) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Aggregations

MockStateStoreSupplier (org.apache.kafka.test.MockStateStoreSupplier)13 Test (org.junit.Test)13 MockProcessorSupplier (org.apache.kafka.test.MockProcessorSupplier)8 HashMap (java.util.HashMap)4 HashSet (java.util.HashSet)3 List (java.util.List)3 Metrics (org.apache.kafka.common.metrics.Metrics)3 Utils.mkList (org.apache.kafka.common.utils.Utils.mkList)3 TaskId (org.apache.kafka.streams.processor.TaskId)3 Properties (java.util.Properties)2 UUID (java.util.UUID)2 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)2 StreamsConfig (org.apache.kafka.streams.StreamsConfig)2 TopicsInfo (org.apache.kafka.streams.processor.TopologyBuilder.TopicsInfo)2 InternalTopicConfig (org.apache.kafka.streams.processor.internals.InternalTopicConfig)2 AssignmentInfo (org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo)2 SubscriptionInfo (org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo)2 MockInternalTopicManager (org.apache.kafka.test.MockInternalTopicManager)2 Collection (java.util.Collection)1 Set (java.util.Set)1