Search in sources :

Example 16 with MockKeyValueStoreBuilder

use of org.apache.kafka.test.MockKeyValueStoreBuilder in project kafka by apache.

the class InternalTopologyBuilderTest method shouldNotAllowToAddStoresWithSameNameWhenSecondStoreIsGlobal.

@Test
public void shouldNotAllowToAddStoresWithSameNameWhenSecondStoreIsGlobal() {
    final StoreBuilder<KeyValueStore<Object, Object>> globalBuilder = new MockKeyValueStoreBuilder("testStore", false).withLoggingDisabled();
    builder.addStateStore(storeBuilder);
    final TopologyException exception = assertThrows(TopologyException.class, () -> builder.addGlobalStore(globalBuilder, "global-store", null, null, null, "global-topic", "global-processor", new MockApiProcessorSupplier<>()));
    assertThat(exception.getMessage(), equalTo("Invalid topology: A different StateStore has already been added with the name testStore"));
}
Also used : MockApiProcessorSupplier(org.apache.kafka.test.MockApiProcessorSupplier) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) TopologyException(org.apache.kafka.streams.errors.TopologyException) Test(org.junit.Test)

Example 17 with MockKeyValueStoreBuilder

use of org.apache.kafka.test.MockKeyValueStoreBuilder in project kafka by apache.

the class StreamsAssignmentScaleTest method completeLargeAssignment.

private void completeLargeAssignment(final int numPartitions, final int numClients, final int numThreadsPerClient, final int numStandbys, final Class<? extends TaskAssignor> taskAssignor) {
    final List<String> topic = singletonList("topic");
    final Map<TopicPartition, Long> changelogEndOffsets = new HashMap<>();
    for (int p = 0; p < numPartitions; ++p) {
        changelogEndOffsets.put(new TopicPartition(APPLICATION_ID + "-store-changelog", p), 100_000L);
    }
    final List<PartitionInfo> partitionInfos = new ArrayList<>();
    for (int p = 0; p < numPartitions; ++p) {
        partitionInfos.add(new PartitionInfo("topic", p, Node.noNode(), new Node[0], new Node[0]));
    }
    final Cluster clusterMetadata = new Cluster("cluster", Collections.singletonList(Node.noNode()), partitionInfos, emptySet(), emptySet());
    final Map<String, Object> configMap = new HashMap<>();
    configMap.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID);
    configMap.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:8080");
    final InternalTopologyBuilder builder = new InternalTopologyBuilder();
    builder.addSource(null, "source", null, null, null, "topic");
    builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source");
    builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor");
    final TopologyMetadata topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configMap));
    topologyMetadata.buildAndRewriteTopology();
    final Consumer<byte[], byte[]> mainConsumer = EasyMock.createNiceMock(Consumer.class);
    final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
    expect(taskManager.topologyMetadata()).andStubReturn(topologyMetadata);
    expect(mainConsumer.committed(new HashSet<>())).andStubReturn(Collections.emptyMap());
    final AdminClient adminClient = createMockAdminClientForAssignor(changelogEndOffsets);
    final ReferenceContainer referenceContainer = new ReferenceContainer();
    referenceContainer.mainConsumer = mainConsumer;
    referenceContainer.adminClient = adminClient;
    referenceContainer.taskManager = taskManager;
    referenceContainer.streamsMetadataState = EasyMock.createNiceMock(StreamsMetadataState.class);
    referenceContainer.time = new MockTime();
    configMap.put(InternalConfig.REFERENCE_CONTAINER_PARTITION_ASSIGNOR, referenceContainer);
    configMap.put(InternalConfig.INTERNAL_TASK_ASSIGNOR_CLASS, taskAssignor.getName());
    configMap.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, numStandbys);
    final MockInternalTopicManager mockInternalTopicManager = new MockInternalTopicManager(new MockTime(), new StreamsConfig(configMap), new MockClientSupplier().restoreConsumer, false);
    EasyMock.replay(taskManager, adminClient, mainConsumer);
    final StreamsPartitionAssignor partitionAssignor = new StreamsPartitionAssignor();
    partitionAssignor.configure(configMap);
    partitionAssignor.setInternalTopicManager(mockInternalTopicManager);
    final Map<String, Subscription> subscriptions = new HashMap<>();
    for (int client = 0; client < numClients; ++client) {
        for (int i = 0; i < numThreadsPerClient; ++i) {
            subscriptions.put(getConsumerName(i, client), new Subscription(topic, getInfo(uuidForInt(client), EMPTY_TASKS, EMPTY_TASKS).encode()));
        }
    }
    final long firstAssignmentStartMs = System.currentTimeMillis();
    final Map<String, Assignment> firstAssignments = partitionAssignor.assign(clusterMetadata, new GroupSubscription(subscriptions)).groupAssignment();
    final long firstAssignmentEndMs = System.currentTimeMillis();
    final long firstAssignmentDuration = firstAssignmentEndMs - firstAssignmentStartMs;
    if (firstAssignmentDuration > MAX_ASSIGNMENT_DURATION) {
        throw new AssertionError("The first assignment took too long to complete at " + firstAssignmentDuration + "ms.");
    } else {
        log.info("First assignment took {}ms.", firstAssignmentDuration);
    }
    // Use the assignment to generate the subscriptions' prev task data for the next rebalance
    for (int client = 0; client < numClients; ++client) {
        for (int i = 0; i < numThreadsPerClient; ++i) {
            final String consumer = getConsumerName(i, client);
            final Assignment assignment = firstAssignments.get(consumer);
            final AssignmentInfo info = AssignmentInfo.decode(assignment.userData());
            subscriptions.put(consumer, new Subscription(topic, getInfo(uuidForInt(client), new HashSet<>(info.activeTasks()), info.standbyTasks().keySet()).encode(), assignment.partitions()));
        }
    }
    final long secondAssignmentStartMs = System.currentTimeMillis();
    final Map<String, Assignment> secondAssignments = partitionAssignor.assign(clusterMetadata, new GroupSubscription(subscriptions)).groupAssignment();
    final long secondAssignmentEndMs = System.currentTimeMillis();
    final long secondAssignmentDuration = secondAssignmentEndMs - secondAssignmentStartMs;
    if (secondAssignmentDuration > MAX_ASSIGNMENT_DURATION) {
        throw new AssertionError("The second assignment took too long to complete at " + secondAssignmentDuration + "ms.");
    } else {
        log.info("Second assignment took {}ms.", secondAssignmentDuration);
    }
    assertThat(secondAssignments.size(), is(numClients * numThreadsPerClient));
}
Also used : ReferenceContainer(org.apache.kafka.streams.processor.internals.assignment.ReferenceContainer) HashMap(java.util.HashMap) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) PartitionInfo(org.apache.kafka.common.PartitionInfo) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) HashSet(java.util.HashSet) Cluster(org.apache.kafka.common.Cluster) TopicPartition(org.apache.kafka.common.TopicPartition) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) AdminClient(org.apache.kafka.clients.admin.AdminClient)

Example 18 with MockKeyValueStoreBuilder

use of org.apache.kafka.test.MockKeyValueStoreBuilder in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldSkipListOffsetsRequestForNewlyCreatedChangelogTopics.

@Test
public void shouldSkipListOffsetsRequestForNewlyCreatedChangelogTopics() {
    adminClient = EasyMock.createMock(AdminClient.class);
    final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
    final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
    allFuture.complete(emptyMap());
    expect(adminClient.listOffsets(emptyMap())).andStubReturn(result);
    expect(result.all()).andReturn(allFuture);
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor1");
    subscriptions.put("consumer10", new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
    EasyMock.replay(result);
    configureDefault();
    overwriteInternalTopicManagerWithMock(true);
    partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
    EasyMock.verify(adminClient);
}
Also used : ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) Collections.emptyMap(java.util.Collections.emptyMap) Matchers.anEmptyMap(org.hamcrest.Matchers.anEmptyMap) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) AdminClient(org.apache.kafka.clients.admin.AdminClient) Test(org.junit.Test)

Example 19 with MockKeyValueStoreBuilder

use of org.apache.kafka.test.MockKeyValueStoreBuilder in project kafka by apache.

the class StreamsPartitionAssignorTest method testAssignWithStandbyReplicasAndLoggingDisabled.

@Test
public void testAssignWithStandbyReplicasAndLoggingDisabled() {
    builder.addSource(null, "source1", null, null, null, "topic1", "topic2");
    builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store1", false).withLoggingDisabled(), "processor");
    final List<String> topics = asList("topic1", "topic2");
    createMockTaskManager(mkSet(TASK_0_0), emptySet());
    configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1));
    subscriptions.put("consumer10", new Subscription(topics, getInfo(UUID_1, mkSet(TASK_0_0), emptySet()).encode()));
    subscriptions.put("consumer20", new Subscription(topics, getInfo(UUID_2, mkSet(TASK_0_2), emptySet()).encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    final AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
    assertTrue(info10.standbyTasks().isEmpty());
    final AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
    assertTrue(info20.standbyTasks().isEmpty());
}
Also used : Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) Test(org.junit.Test)

Example 20 with MockKeyValueStoreBuilder

use of org.apache.kafka.test.MockKeyValueStoreBuilder in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldRequestEndOffsetsForPreexistingChangelogs.

@Test
public void shouldRequestEndOffsetsForPreexistingChangelogs() {
    final Set<TopicPartition> changelogs = mkSet(new TopicPartition(APPLICATION_ID + "-store-changelog", 0), new TopicPartition(APPLICATION_ID + "-store-changelog", 1), new TopicPartition(APPLICATION_ID + "-store-changelog", 2));
    adminClient = EasyMock.createMock(AdminClient.class);
    final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
    final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
    allFuture.complete(changelogs.stream().collect(Collectors.toMap(tp -> tp, tp -> {
        final ListOffsetsResultInfo info = EasyMock.createNiceMock(ListOffsetsResultInfo.class);
        expect(info.offset()).andStubReturn(Long.MAX_VALUE);
        EasyMock.replay(info);
        return info;
    })));
    final Capture<Map<TopicPartition, OffsetSpec>> capturedChangelogs = EasyMock.newCapture();
    expect(adminClient.listOffsets(EasyMock.capture(capturedChangelogs))).andReturn(result).once();
    expect(result.all()).andReturn(allFuture);
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor1");
    subscriptions.put("consumer10", new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
    EasyMock.replay(result);
    configureDefault();
    overwriteInternalTopicManagerWithMock(false);
    partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
    EasyMock.verify(adminClient);
    assertThat(capturedChangelogs.getValue().keySet(), equalTo(changelogs));
}
Also used : ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) TopicPartition(org.apache.kafka.common.TopicPartition) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) Collections.emptyMap(java.util.Collections.emptyMap) Matchers.anEmptyMap(org.hamcrest.Matchers.anEmptyMap) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) AdminClient(org.apache.kafka.clients.admin.AdminClient) Test(org.junit.Test)

Aggregations

MockKeyValueStoreBuilder (org.apache.kafka.test.MockKeyValueStoreBuilder)22 Test (org.junit.Test)21 GroupSubscription (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription)11 Subscription (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription)11 Assignment (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment)7 AssignmentInfo (org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo)6 HashSet (java.util.HashSet)5 TaskId (org.apache.kafka.streams.processor.TaskId)5 KeyValueStore (org.apache.kafka.streams.state.KeyValueStore)5 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4 AdminClient (org.apache.kafka.clients.admin.AdminClient)4 TopologyException (org.apache.kafka.streams.errors.TopologyException)4 Map (java.util.Map)3 TopicPartition (org.apache.kafka.common.TopicPartition)3 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)3 Collections.emptyMap (java.util.Collections.emptyMap)2 Collections.singletonMap (java.util.Collections.singletonMap)2 Set (java.util.Set)2 ListOffsetsResult (org.apache.kafka.clients.admin.ListOffsetsResult)2