Search in sources :

Example 76 with Context

use of io.fabric8.mockwebserver.Context in project strimzi by strimzi.

the class ClusterControllerTest method startStop.

/**
 * Does the CC start and then stop a verticle per namespace?
 * @param context
 * @param namespaces
 */
private void startStop(TestContext context, String namespaces) {
    AtomicInteger numWatchers = new AtomicInteger(0);
    KubernetesClient client = mock(KubernetesClient.class);
    MixedOperation mockCms = mock(MixedOperation.class);
    when(client.configMaps()).thenReturn(mockCms);
    List<String> namespaceList = asList(namespaces.split(" *,+ *"));
    for (String namespace : namespaceList) {
        MixedOperation mockNamespacedCms = mock(MixedOperation.class);
        when(mockNamespacedCms.watch(any())).thenAnswer(invo -> {
            numWatchers.incrementAndGet();
            Watch mockWatch = mock(Watch.class);
            doAnswer(invo2 -> {
                ((Watcher) invo.getArgument(0)).onClose(null);
                return null;
            }).when(mockWatch).close();
            return mockWatch;
        });
        when(mockNamespacedCms.withLabels(any())).thenReturn(mockNamespacedCms);
        when(mockCms.inNamespace(namespace)).thenReturn(mockNamespacedCms);
    }
    Async async = context.async();
    Map<String, String> env = new HashMap<>();
    env.put(ClusterControllerConfig.STRIMZI_NAMESPACE, namespaces);
    env.put(ClusterControllerConfig.STRIMZI_CONFIGMAP_LABELS, STRIMZI_IO_KIND_CLUSTER);
    env.put(ClusterControllerConfig.STRIMZI_FULL_RECONCILIATION_INTERVAL_MS, "120000");
    Main.run(vertx, client, true, env).setHandler(ar -> {
        context.assertNull(ar.cause(), "Expected all verticles to start OK");
        async.complete();
    });
    async.await();
    context.assertEquals(namespaceList.size(), vertx.deploymentIDs().size(), "A verticle per namespace");
    List<Async> asyncs = new ArrayList<>();
    for (String deploymentId : vertx.deploymentIDs()) {
        Async async2 = context.async();
        asyncs.add(async2);
        vertx.undeploy(deploymentId, ar -> {
            context.assertNull(ar.cause(), "Didn't expect error when undeploying verticle " + deploymentId);
            async2.complete();
        });
    }
    for (Async async2 : asyncs) {
        async2.await();
    }
    if (numWatchers.get() > namespaceList.size()) {
        context.fail("Looks like there were more watchers than namespaces");
    }
}
Also used : KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HashMap(java.util.HashMap) Async(io.vertx.ext.unit.Async) Watch(io.fabric8.kubernetes.client.Watch) ArrayList(java.util.ArrayList) Watcher(io.fabric8.kubernetes.client.Watcher) MixedOperation(io.fabric8.kubernetes.client.dsl.MixedOperation)

Example 77 with Context

use of io.fabric8.mockwebserver.Context in project strimzi by strimzi.

the class KafkaAssemblyOperatorMockIT method assertStorageClass.

private void assertStorageClass(TestContext context, String statefulSetName, String expectedClass) {
    StatefulSet statefulSet = mockClient.apps().statefulSets().inNamespace(NAMESPACE).withName(statefulSetName).get();
    context.assertNotNull(statefulSet);
    // Check the storage class is initially "foo"
    List<PersistentVolumeClaim> volumeClaimTemplates = statefulSet.getSpec().getVolumeClaimTemplates();
    context.assertFalse(volumeClaimTemplates.isEmpty());
    context.assertEquals(expectedClass, volumeClaimTemplates.get(0).getSpec().getStorageClassName());
}
Also used : PersistentVolumeClaim(io.fabric8.kubernetes.api.model.PersistentVolumeClaim) StatefulSet(io.fabric8.kubernetes.api.model.extensions.StatefulSet)

Example 78 with Context

use of io.fabric8.mockwebserver.Context in project strimzi by strimzi.

the class KafkaAssemblyOperatorMockIT method testUpdateKafkaWithChangedDeleteClaim.

/**
 * Test that we can change the deleteClaim flag, and that it's honoured
 */
@Test
public void testUpdateKafkaWithChangedDeleteClaim(TestContext context) {
    if (!Storage.StorageType.PERSISTENT_CLAIM.equals(storageType(kafkaStorage))) {
        LOGGER.info("Skipping claim-based test because using storage type {}", kafkaStorage);
        return;
    }
    Set<String> allPvcs = new HashSet<>();
    Set<String> kafkaPvcs = createPvcs(kafkaStorage, kafkaReplicas, podId -> KafkaCluster.getPersistentVolumeClaimName(KafkaCluster.kafkaClusterName(CLUSTER_NAME), podId));
    Set<String> zkPvcs = createPvcs(zkStorage, zkReplicas, podId -> ZookeeperCluster.getPersistentVolumeClaimName(ZookeeperCluster.zookeeperClusterName(CLUSTER_NAME), podId));
    allPvcs.addAll(kafkaPvcs);
    allPvcs.addAll(zkPvcs);
    KafkaAssemblyOperator kco = createCluster(context);
    boolean originalKafkaDeleteClaim = deleteClaim(kafkaStorage);
    // assertDeleteClaim(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalKafkaDeleteClaim);
    // Try to update the storage class
    boolean changedKafkaDeleteClaim = !originalKafkaDeleteClaim;
    HashMap<String, String> data = new HashMap<>(cluster.getData());
    data.put(KafkaCluster.KEY_STORAGE, new JsonObject(kafkaStorage.toString()).put(Storage.DELETE_CLAIM_FIELD, changedKafkaDeleteClaim).toString());
    ConfigMap changedClusterCm = new ConfigMapBuilder(cluster).withData(data).build();
    mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).patch(changedClusterCm);
    LOGGER.info("Updating with changed delete claim");
    Async updateAsync = context.async();
    kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
        if (ar.failed())
            ar.cause().printStackTrace();
        context.assertTrue(ar.succeeded());
        updateAsync.complete();
    });
    updateAsync.await();
    LOGGER.info("Reconciling again -> delete");
    mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).delete();
    Async deleteAsync = context.async();
    kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
        if (ar.failed())
            ar.cause().printStackTrace();
        context.assertTrue(ar.succeeded());
        assertPvcs(context, changedKafkaDeleteClaim ? deleteClaim(zkStorage) ? emptySet() : zkPvcs : deleteClaim(zkStorage) ? kafkaPvcs : allPvcs);
        deleteAsync.complete();
    });
}
Also used : ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) HashMap(java.util.HashMap) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) Async(io.vertx.ext.unit.Async) Reconciliation(io.strimzi.controller.cluster.Reconciliation) JsonObject(io.vertx.core.json.JsonObject) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 79 with Context

use of io.fabric8.mockwebserver.Context in project strimzi by strimzi.

the class KafkaAssemblyOperatorMockIT method testKafkaScaleUp.

/**
 * Create a cluster from a Kafka Cluster CM
 */
@Test
public void testKafkaScaleUp(TestContext context) {
    KafkaAssemblyOperator kco = createCluster(context);
    Async updateAsync = context.async();
    int newScale = kafkaReplicas + 1;
    String newPod = KafkaCluster.kafkaPodName(CLUSTER_NAME, kafkaReplicas);
    context.assertNull(mockClient.pods().inNamespace(NAMESPACE).withName(newPod).get());
    HashMap<String, String> data = new HashMap<>(cluster.getData());
    data.put(KafkaCluster.KEY_REPLICAS, String.valueOf(newScale));
    ConfigMap changedClusterCm = new ConfigMapBuilder(cluster).withData(data).build();
    mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).patch(changedClusterCm);
    LOGGER.info("Scaling up to {} Kafka pods", newScale);
    kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
        if (ar.failed())
            ar.cause().printStackTrace();
        context.assertTrue(ar.succeeded());
        context.assertEquals(newScale, mockClient.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get().getSpec().getReplicas());
        context.assertNotNull(mockClient.pods().inNamespace(NAMESPACE).withName(newPod).get(), "Expected pod " + newPod + " to have been created");
        // TODO assert no rolling update
        updateAsync.complete();
    });
    updateAsync.await();
}
Also used : ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) HashMap(java.util.HashMap) Async(io.vertx.ext.unit.Async) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) Reconciliation(io.strimzi.controller.cluster.Reconciliation) Test(org.junit.Test)

Example 80 with Context

use of io.fabric8.mockwebserver.Context in project strimzi by strimzi.

the class KafkaAssemblyOperatorMockIT method testKafkaScaleDown.

/**
 * Create a cluster from a Kafka Cluster CM
 */
@Test
public void testKafkaScaleDown(TestContext context) {
    if (kafkaReplicas <= 1) {
        LOGGER.info("Skipping scale down test because there's only 1 broker");
        return;
    }
    KafkaAssemblyOperator kco = createCluster(context);
    Async updateAsync = context.async();
    int newScale = kafkaReplicas - 1;
    String deletedPod = KafkaCluster.kafkaPodName(CLUSTER_NAME, newScale);
    context.assertNotNull(mockClient.pods().inNamespace(NAMESPACE).withName(deletedPod).get());
    HashMap<String, String> data = new HashMap<>(cluster.getData());
    data.put(KafkaCluster.KEY_REPLICAS, String.valueOf(newScale));
    ConfigMap changedClusterCm = new ConfigMapBuilder(cluster).withData(data).build();
    mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).patch(changedClusterCm);
    LOGGER.info("Scaling down to {} Kafka pods", newScale);
    kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
        if (ar.failed())
            ar.cause().printStackTrace();
        context.assertTrue(ar.succeeded());
        context.assertEquals(newScale, mockClient.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get().getSpec().getReplicas());
        context.assertNull(mockClient.pods().inNamespace(NAMESPACE).withName(deletedPod).get(), "Expected pod " + deletedPod + " to have been deleted");
        // TODO assert no rolling update
        updateAsync.complete();
    });
    updateAsync.await();
}
Also used : ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) HashMap(java.util.HashMap) Async(io.vertx.ext.unit.Async) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) Reconciliation(io.strimzi.controller.cluster.Reconciliation) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)138 ConfigMap (io.fabric8.kubernetes.api.model.ConfigMap)58 Async (io.vertx.ext.unit.Async)52 Expectations (mockit.Expectations)34 Probe (io.fabric8.kubernetes.api.model.Probe)33 File (java.io.File)26 Reconciliation (io.strimzi.controller.cluster.Reconciliation)24 IOException (java.io.IOException)24 Git (org.eclipse.jgit.api.Git)23 GitContext (io.fabric8.api.GitContext)21 ConfigMapOperator (io.strimzi.controller.cluster.operator.resource.ConfigMapOperator)20 ServiceOperator (io.strimzi.controller.cluster.operator.resource.ServiceOperator)20 HashMap (java.util.HashMap)18 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)18 LockHandle (io.fabric8.api.LockHandle)15 KubernetesListBuilder (io.fabric8.kubernetes.api.model.KubernetesListBuilder)15 NonNamespaceOperation (io.fabric8.kubernetes.client.dsl.NonNamespaceOperation)15 ProcessorConfig (io.fabric8.maven.core.config.ProcessorConfig)15 MixedOperation (io.fabric8.kubernetes.client.dsl.MixedOperation)14 Resource (io.fabric8.kubernetes.client.dsl.Resource)14