Search in sources :

Example 1 with MockKeyValueStoreBuilder

use of org.apache.kafka.test.MockKeyValueStoreBuilder in project kafka by apache.

the class StreamsPartitionAssignorTest method testAssignBasic.

@Test
public void testAssignBasic() {
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addSource(null, "source2", null, null, null, "topic2");
    builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1", "source2");
    builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor");
    final List<String> topics = asList("topic1", "topic2");
    final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
    final Set<TaskId> prevTasks10 = mkSet(TASK_0_0);
    final Set<TaskId> prevTasks11 = mkSet(TASK_0_1);
    final Set<TaskId> prevTasks20 = mkSet(TASK_0_2);
    final Set<TaskId> standbyTasks10 = EMPTY_TASKS;
    final Set<TaskId> standbyTasks11 = mkSet(TASK_0_2);
    final Set<TaskId> standbyTasks20 = mkSet(TASK_0_0);
    createMockTaskManager(prevTasks10, standbyTasks10);
    adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(singletonList(APPLICATION_ID + "-store-changelog"), singletonList(3)));
    configureDefaultPartitionAssignor();
    subscriptions.put("consumer10", new Subscription(topics, getInfo(UUID_1, prevTasks10, standbyTasks10).encode()));
    subscriptions.put("consumer11", new Subscription(topics, getInfo(UUID_1, prevTasks11, standbyTasks11).encode()));
    subscriptions.put("consumer20", new Subscription(topics, getInfo(UUID_2, prevTasks20, standbyTasks20).encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    // check the assignment
    assertEquals(mkSet(mkSet(t1p0, t2p0), mkSet(t1p1, t2p1)), mkSet(new HashSet<>(assignments.get("consumer10").partitions()), new HashSet<>(assignments.get("consumer11").partitions())));
    assertEquals(mkSet(t1p2, t2p2), new HashSet<>(assignments.get("consumer20").partitions()));
    // check assignment info
    // the first consumer
    final AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
    final Set<TaskId> allActiveTasks = new HashSet<>(info10.activeTasks());
    // the second consumer
    final AssignmentInfo info11 = checkAssignment(allTopics, assignments.get("consumer11"));
    allActiveTasks.addAll(info11.activeTasks());
    assertEquals(mkSet(TASK_0_0, TASK_0_1), allActiveTasks);
    // the third consumer
    final AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
    allActiveTasks.addAll(info20.activeTasks());
    assertEquals(3, allActiveTasks.size());
    assertEquals(allTasks, new HashSet<>(allActiveTasks));
    assertEquals(3, allActiveTasks.size());
    assertEquals(allTasks, allActiveTasks);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 2 with MockKeyValueStoreBuilder

use of org.apache.kafka.test.MockKeyValueStoreBuilder in project kafka by apache.

the class StreamsPartitionAssignorTest method testAssignWithStates.

@Test
public void testAssignWithStates() {
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addSource(null, "source2", null, null, null, "topic2");
    builder.addProcessor("processor-1", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor-1");
    builder.addProcessor("processor-2", new MockApiProcessorSupplier<>(), "source2");
    builder.addStateStore(new MockKeyValueStoreBuilder("store2", false), "processor-2");
    builder.addStateStore(new MockKeyValueStoreBuilder("store3", false), "processor-2");
    final List<String> topics = asList("topic1", "topic2");
    final List<TaskId> tasks = asList(TASK_0_0, TASK_0_1, TASK_0_2, TASK_1_0, TASK_1_1, TASK_1_2);
    adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(asList(APPLICATION_ID + "-store1-changelog", APPLICATION_ID + "-store2-changelog", APPLICATION_ID + "-store3-changelog"), asList(3, 3, 3)));
    configureDefault();
    subscriptions.put("consumer10", new Subscription(topics, defaultSubscriptionInfo.encode()));
    subscriptions.put("consumer11", new Subscription(topics, defaultSubscriptionInfo.encode()));
    subscriptions.put("consumer20", new Subscription(topics, getInfo(UUID_2, EMPTY_TASKS, EMPTY_TASKS).encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    // check assigned partition size: since there is no previous task and there are two sub-topologies the assignment is random so we cannot check exact match
    assertEquals(2, assignments.get("consumer10").partitions().size());
    assertEquals(2, assignments.get("consumer11").partitions().size());
    assertEquals(2, assignments.get("consumer20").partitions().size());
    final AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
    final AssignmentInfo info11 = AssignmentInfo.decode(assignments.get("consumer11").userData());
    final AssignmentInfo info20 = AssignmentInfo.decode(assignments.get("consumer20").userData());
    assertEquals(2, info10.activeTasks().size());
    assertEquals(2, info11.activeTasks().size());
    assertEquals(2, info20.activeTasks().size());
    final Set<TaskId> allTasks = new HashSet<>();
    allTasks.addAll(info10.activeTasks());
    allTasks.addAll(info11.activeTasks());
    allTasks.addAll(info20.activeTasks());
    assertEquals(new HashSet<>(tasks), allTasks);
    // check tasks for state topics
    final Map<Subtopology, InternalTopologyBuilder.TopicsInfo> topicGroups = builder.subtopologyToTopicsInfo();
    assertEquals(mkSet(TASK_0_0, TASK_0_1, TASK_0_2), tasksForState("store1", tasks, topicGroups));
    assertEquals(mkSet(TASK_1_0, TASK_1_1, TASK_1_2), tasksForState("store2", tasks, topicGroups));
    assertEquals(mkSet(TASK_1_0, TASK_1_1, TASK_1_2), tasksForState("store3", tasks, topicGroups));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Subtopology(org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 3 with MockKeyValueStoreBuilder

use of org.apache.kafka.test.MockKeyValueStoreBuilder in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldThrowIllegalStateExceptionIfAnyTopicsMissingFromChangelogEndOffsets.

@Test
public void shouldThrowIllegalStateExceptionIfAnyTopicsMissingFromChangelogEndOffsets() {
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store2", false), "processor1");
    adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(singletonList(APPLICATION_ID + "-store1-changelog"), singletonList(3)));
    configureDefault();
    subscriptions.put("consumer10", new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
    assertThrows(IllegalStateException.class, () -> partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)));
}
Also used : GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) Test(org.junit.Test)

Example 4 with MockKeyValueStoreBuilder

use of org.apache.kafka.test.MockKeyValueStoreBuilder in project kafka by apache.

the class StreamThreadTest method shouldCreateStandbyTask.

@Test
public void shouldCreateStandbyTask() {
    setupInternalTopologyWithoutState();
    internalTopologyBuilder.addStateStore(new MockKeyValueStoreBuilder("myStore", true), "processor1");
    assertThat(createStandbyTask(), not(empty()));
}
Also used : MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) Test(org.junit.Test)

Example 5 with MockKeyValueStoreBuilder

use of org.apache.kafka.test.MockKeyValueStoreBuilder in project kafka by apache.

the class HighAvailabilityStreamsPartitionAssignorTest method shouldReturnAllActiveTasksToPreviousOwnerRegardlessOfBalanceAndTriggerRebalanceIfEndOffsetFetchFailsAndHighAvailabilityEnabled.

@Test
public void shouldReturnAllActiveTasksToPreviousOwnerRegardlessOfBalanceAndTriggerRebalanceIfEndOffsetFetchFailsAndHighAvailabilityEnabled() {
    final long rebalanceInterval = 5 * 60 * 1000L;
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor1");
    final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
    createMockTaskManager(allTasks);
    adminClient = EasyMock.createMock(AdminClient.class);
    expect(adminClient.listOffsets(anyObject())).andThrow(new StreamsException("Should be handled"));
    configurePartitionAssignorWith(singletonMap(StreamsConfig.PROBING_REBALANCE_INTERVAL_MS_CONFIG, rebalanceInterval));
    final String firstConsumer = "consumer1";
    final String newConsumer = "consumer2";
    subscriptions.put(firstConsumer, new Subscription(singletonList("source1"), getInfo(UUID_1, allTasks).encode()));
    subscriptions.put(newConsumer, new Subscription(singletonList("source1"), getInfo(UUID_2, EMPTY_TASKS).encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    final AssignmentInfo firstConsumerUserData = AssignmentInfo.decode(assignments.get(firstConsumer).userData());
    final List<TaskId> firstConsumerActiveTasks = firstConsumerUserData.activeTasks();
    final AssignmentInfo newConsumerUserData = AssignmentInfo.decode(assignments.get(newConsumer).userData());
    final List<TaskId> newConsumerActiveTasks = newConsumerUserData.activeTasks();
    // The tasks were returned to their prior owner
    final ArrayList<TaskId> sortedExpectedTasks = new ArrayList<>(allTasks);
    Collections.sort(sortedExpectedTasks);
    assertThat(firstConsumerActiveTasks, equalTo(sortedExpectedTasks));
    assertThat(newConsumerActiveTasks, empty());
    // There is a rebalance scheduled
    assertThat(time.milliseconds() + rebalanceInterval, anyOf(is(firstConsumerUserData.nextRebalanceMs()), is(newConsumerUserData.nextRebalanceMs())));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) StreamsException(org.apache.kafka.streams.errors.StreamsException) ArrayList(java.util.ArrayList) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) AdminClient(org.apache.kafka.clients.admin.AdminClient) Test(org.junit.Test)

Aggregations

MockKeyValueStoreBuilder (org.apache.kafka.test.MockKeyValueStoreBuilder)22 Test (org.junit.Test)21 GroupSubscription (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription)11 Subscription (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription)11 Assignment (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment)7 AssignmentInfo (org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo)6 HashSet (java.util.HashSet)5 TaskId (org.apache.kafka.streams.processor.TaskId)5 KeyValueStore (org.apache.kafka.streams.state.KeyValueStore)5 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4 AdminClient (org.apache.kafka.clients.admin.AdminClient)4 TopologyException (org.apache.kafka.streams.errors.TopologyException)4 Map (java.util.Map)3 TopicPartition (org.apache.kafka.common.TopicPartition)3 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)3 Collections.emptyMap (java.util.Collections.emptyMap)2 Collections.singletonMap (java.util.Collections.singletonMap)2 Set (java.util.Set)2 ListOffsetsResult (org.apache.kafka.clients.admin.ListOffsetsResult)2