Search in sources :

Example 1 with StatefulSet

use of io.fabric8.kubernetes.api.model.apps.StatefulSet in project jointware by isdream.

the class KubernetesKeyValueStyleGeneratorTest method testKubernetesWithAllKind.

protected static void testKubernetesWithAllKind() throws Exception {
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new ServiceAccount());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new ThirdPartyResource());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new ResourceQuota());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Node());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new ConfigMap());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new NetworkPolicy());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new CustomResourceDefinition());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Ingress());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Service());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Namespace());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Secret());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new LimitRange());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Event());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new PersistentVolume());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new StatefulSet());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new PersistentVolumeClaim());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new DaemonSet());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new HorizontalPodAutoscaler());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Pod());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new ReplicaSet());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Job());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new ReplicationController());
    info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Deployment());
}
Also used : ServiceAccount(io.fabric8.kubernetes.api.model.ServiceAccount) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) Pod(io.fabric8.kubernetes.api.model.Pod) ThirdPartyResource(io.fabric8.kubernetes.api.model.extensions.ThirdPartyResource) NetworkPolicy(io.fabric8.kubernetes.api.model.extensions.NetworkPolicy) CustomResourceDefinition(io.fabric8.kubernetes.api.model.apiextensions.CustomResourceDefinition) Node(io.fabric8.kubernetes.api.model.Node) Ingress(io.fabric8.kubernetes.api.model.extensions.Ingress) Service(io.fabric8.kubernetes.api.model.Service) Deployment(io.fabric8.kubernetes.api.model.extensions.Deployment) Namespace(io.fabric8.kubernetes.api.model.Namespace) Secret(io.fabric8.kubernetes.api.model.Secret) LimitRange(io.fabric8.kubernetes.api.model.LimitRange) ResourceQuota(io.fabric8.kubernetes.api.model.ResourceQuota) ReplicationController(io.fabric8.kubernetes.api.model.ReplicationController) HorizontalPodAutoscaler(io.fabric8.kubernetes.api.model.HorizontalPodAutoscaler) Event(io.fabric8.kubernetes.api.model.Event) PersistentVolumeClaim(io.fabric8.kubernetes.api.model.PersistentVolumeClaim) DaemonSet(io.fabric8.kubernetes.api.model.extensions.DaemonSet) PersistentVolume(io.fabric8.kubernetes.api.model.PersistentVolume) StatefulSet(io.fabric8.kubernetes.api.model.extensions.StatefulSet) Job(io.fabric8.kubernetes.api.model.Job) KubernetesDocumentKeyValueStyleGenerator(com.github.isdream.chameleon.docs.KubernetesDocumentKeyValueStyleGenerator) ReplicaSet(io.fabric8.kubernetes.api.model.extensions.ReplicaSet)

Example 2 with StatefulSet

use of io.fabric8.kubernetes.api.model.apps.StatefulSet in project fabric8 by jboss-fuse.

the class Controller method applyEntity.

/**
 * Applies the given DTOs onto the Kubernetes master
 */
public void applyEntity(Object dto, String sourceName) throws Exception {
    if (dto instanceof Pod) {
        applyPod((Pod) dto, sourceName);
    } else if (dto instanceof ReplicationController) {
        applyReplicationController((ReplicationController) dto, sourceName);
    } else if (dto instanceof Service) {
        applyService((Service) dto, sourceName);
    } else if (dto instanceof Namespace) {
        applyNamespace((Namespace) dto);
    } else if (dto instanceof Route) {
        applyRoute((Route) dto, sourceName);
    } else if (dto instanceof BuildConfig) {
        applyBuildConfig((BuildConfig) dto, sourceName);
    } else if (dto instanceof DeploymentConfig) {
        DeploymentConfig resource = (DeploymentConfig) dto;
        OpenShiftClient openShiftClient = getOpenShiftClientOrNull();
        if (openShiftClient != null && openShiftClient.supportsOpenShiftAPIGroup(OpenShiftAPIGroups.APPS)) {
            applyResource(resource, sourceName, openShiftClient.deploymentConfigs());
        } else {
            LOG.warn("Not connected to OpenShift cluster so cannot apply entity " + dto);
        }
    } else if (dto instanceof PolicyBinding) {
        applyPolicyBinding((PolicyBinding) dto, sourceName);
    } else if (dto instanceof RoleBinding) {
        applyRoleBinding((RoleBinding) dto, sourceName);
    } else if (dto instanceof Role) {
        Role resource = (Role) dto;
        OpenShiftClient openShiftClient = getOpenShiftClientOrNull();
        if (openShiftClient != null && openShiftClient.supportsOpenShiftAPIGroup(OpenShiftAPIGroups.AUTHORIZATION)) {
            applyResource(resource, sourceName, openShiftClient.roles());
        } else {
            LOG.warn("Not connected to OpenShift cluster so cannot apply entity " + dto);
        }
    } else if (dto instanceof ImageStream) {
        applyImageStream((ImageStream) dto, sourceName);
    } else if (dto instanceof OAuthClient) {
        applyOAuthClient((OAuthClient) dto, sourceName);
    } else if (dto instanceof Template) {
        applyTemplate((Template) dto, sourceName);
    } else if (dto instanceof ServiceAccount) {
        applyServiceAccount((ServiceAccount) dto, sourceName);
    } else if (dto instanceof Secret) {
        applySecret((Secret) dto, sourceName);
    } else if (dto instanceof ConfigMap) {
        applyResource((ConfigMap) dto, sourceName, kubernetesClient.configMaps());
    } else if (dto instanceof DaemonSet) {
        applyResource((DaemonSet) dto, sourceName, kubernetesClient.extensions().daemonSets());
    } else if (dto instanceof Deployment) {
        applyResource((Deployment) dto, sourceName, kubernetesClient.extensions().deployments());
    } else if (dto instanceof ReplicaSet) {
        applyResource((ReplicaSet) dto, sourceName, kubernetesClient.extensions().replicaSets());
    } else if (dto instanceof StatefulSet) {
        applyResource((StatefulSet) dto, sourceName, kubernetesClient.apps().statefulSets());
    } else if (dto instanceof Ingress) {
        applyResource((Ingress) dto, sourceName, kubernetesClient.extensions().ingresses());
    } else if (dto instanceof PersistentVolumeClaim) {
        applyPersistentVolumeClaim((PersistentVolumeClaim) dto, sourceName);
    } else if (dto instanceof HasMetadata) {
        HasMetadata entity = (HasMetadata) dto;
        try {
            String namespace = getNamespace();
            String resourceNamespace = getNamespace(entity);
            if (Strings.isNotBlank(namespace) && Strings.isNullOrBlank(resourceNamespace)) {
                getOrCreateMetadata(entity).setNamespace(namespace);
            }
            LOG.info("Applying " + getKind(entity) + " " + getName(entity) + " from " + sourceName);
            kubernetesClient.resource(entity).inNamespace(namespace).createOrReplace();
        } catch (Exception e) {
            onApplyError("Failed to create " + getKind(entity) + " from " + sourceName + ". " + e, e);
        }
    } else {
        throw new IllegalArgumentException("Unknown entity type " + dto);
    }
}
Also used : ServiceAccount(io.fabric8.kubernetes.api.model.ServiceAccount) OAuthClient(io.fabric8.openshift.api.model.OAuthClient) DoneableImageStream(io.fabric8.openshift.api.model.DoneableImageStream) ImageStream(io.fabric8.openshift.api.model.ImageStream) Deployment(io.fabric8.kubernetes.api.model.extensions.Deployment) Template(io.fabric8.openshift.api.model.Template) ReplicationController(io.fabric8.kubernetes.api.model.ReplicationController) BuildConfig(io.fabric8.openshift.api.model.BuildConfig) RoleBinding(io.fabric8.openshift.api.model.RoleBinding) ReplicaSet(io.fabric8.kubernetes.api.model.extensions.ReplicaSet) Route(io.fabric8.openshift.api.model.Route) HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) Pod(io.fabric8.kubernetes.api.model.Pod) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) Service(io.fabric8.kubernetes.api.model.Service) Ingress(io.fabric8.kubernetes.api.model.extensions.Ingress) Namespace(io.fabric8.kubernetes.api.model.Namespace) PolicyBinding(io.fabric8.openshift.api.model.PolicyBinding) KubernetesClientException(io.fabric8.kubernetes.client.KubernetesClientException) FileNotFoundException(java.io.FileNotFoundException) OpenShiftNotAvailableException(io.fabric8.openshift.client.OpenShiftNotAvailableException) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) IOException(java.io.IOException) Role(io.fabric8.openshift.api.model.Role) Secret(io.fabric8.kubernetes.api.model.Secret) OpenShiftClient(io.fabric8.openshift.client.OpenShiftClient) DaemonSet(io.fabric8.kubernetes.api.model.extensions.DaemonSet) PersistentVolumeClaim(io.fabric8.kubernetes.api.model.PersistentVolumeClaim) DeploymentConfig(io.fabric8.openshift.api.model.DeploymentConfig) StatefulSet(io.fabric8.kubernetes.api.model.extensions.StatefulSet)

Example 3 with StatefulSet

use of io.fabric8.kubernetes.api.model.apps.StatefulSet in project fabric8-maven-plugin by fabric8io.

the class DefaultControllerEnricher method addMissingResources.

@Override
public void addMissingResources(KubernetesListBuilder builder) {
    final String name = getConfig(Config.name, MavenUtil.createDefaultResourceName(getProject()));
    final ResourceConfig config = new ResourceConfig.Builder().controllerName(name).imagePullPolicy(getConfig(Config.pullPolicy)).withReplicas(Configs.asInt(getConfig(Config.replicaCount))).build();
    final List<ImageConfiguration> images = getImages();
    // Check if at least a replica set is added. If not add a default one
    if (!KubernetesResourceUtil.checkForKind(builder, POD_CONTROLLER_KINDS)) {
        // At least one image must be present, otherwise the resulting config will be invalid
        if (!Lists.isNullOrEmpty(images)) {
            String type = getConfig(Config.type);
            if ("deployment".equalsIgnoreCase(type)) {
                log.info("Adding a default Deployment");
                builder.addToDeploymentItems(deployHandler.getDeployment(config, images));
            } else if ("statefulSet".equalsIgnoreCase(type)) {
                log.info("Adding a default StatefulSet");
                builder.addToStatefulSetItems(statefulSetHandler.getStatefulSet(config, images));
            } else if ("daemonSet".equalsIgnoreCase(type)) {
                log.info("Adding a default DaemonSet");
                builder.addToDaemonSetItems(daemonSetHandler.getDaemonSet(config, images));
            } else if ("replicaSet".equalsIgnoreCase(type)) {
                log.info("Adding a default ReplicaSet");
                builder.addToReplicaSetItems(rsHandler.getReplicaSet(config, images));
            } else if ("replicationController".equalsIgnoreCase(type)) {
                log.info("Adding a default ReplicationController");
                builder.addToReplicationControllerItems(rcHandler.getReplicationController(config, images));
            } else if ("job".equalsIgnoreCase(type)) {
                log.info("Adding a default Job");
                builder.addToJobItems(jobHandler.getJob(config, images));
            }
        }
    } else if (KubernetesResourceUtil.checkForKind(builder, "StatefulSet")) {
        final StatefulSetSpec spec = statefulSetHandler.getStatefulSet(config, images).getSpec();
        if (spec != null) {
            builder.accept(new TypedVisitor<StatefulSetBuilder>() {

                @Override
                public void visit(StatefulSetBuilder statefulSetBuilder) {
                    statefulSetBuilder.editOrNewSpec().editOrNewTemplate().editOrNewSpec().endSpec().endTemplate().endSpec();
                    mergeStatefulSetSpec(statefulSetBuilder, spec);
                }
            });
            if (spec.getTemplate() != null && spec.getTemplate().getSpec() != null) {
                final PodSpec podSpec = spec.getTemplate().getSpec();
                builder.accept(new TypedVisitor<PodSpecBuilder>() {

                    @Override
                    public void visit(PodSpecBuilder builder) {
                        KubernetesResourceUtil.mergePodSpec(builder, podSpec, name);
                    }
                });
            }
        }
    } else {
        final DeploymentSpec spec = deployHandler.getDeployment(config, images).getSpec();
        if (spec != null) {
            builder.accept(new TypedVisitor<DeploymentBuilder>() {

                @Override
                public void visit(DeploymentBuilder deploymentBuilder) {
                    deploymentBuilder.editOrNewSpec().editOrNewTemplate().editOrNewSpec().endSpec().endTemplate().endSpec();
                    mergeDeploymentSpec(deploymentBuilder, spec);
                }
            });
            if (spec.getTemplate() != null && spec.getTemplate().getSpec() != null) {
                final PodSpec podSpec = spec.getTemplate().getSpec();
                builder.accept(new TypedVisitor<PodSpecBuilder>() {

                    @Override
                    public void visit(PodSpecBuilder builder) {
                        KubernetesResourceUtil.mergePodSpec(builder, podSpec, name);
                    }
                });
            }
        }
    }
}
Also used : TypedVisitor(io.fabric8.kubernetes.api.builder.TypedVisitor) PodSpecBuilder(io.fabric8.kubernetes.api.model.PodSpecBuilder) StatefulSetSpec(io.fabric8.kubernetes.api.model.extensions.StatefulSetSpec) DeploymentSpec(io.fabric8.kubernetes.api.model.extensions.DeploymentSpec) PodSpec(io.fabric8.kubernetes.api.model.PodSpec) ImageConfiguration(io.fabric8.maven.docker.config.ImageConfiguration) StatefulSetBuilder(io.fabric8.kubernetes.api.model.extensions.StatefulSetBuilder) PodSpecBuilder(io.fabric8.kubernetes.api.model.PodSpecBuilder) KubernetesListBuilder(io.fabric8.kubernetes.api.model.KubernetesListBuilder) DeploymentBuilder(io.fabric8.kubernetes.api.model.extensions.DeploymentBuilder) ResourceConfig(io.fabric8.maven.core.config.ResourceConfig) StatefulSetBuilder(io.fabric8.kubernetes.api.model.extensions.StatefulSetBuilder) DeploymentBuilder(io.fabric8.kubernetes.api.model.extensions.DeploymentBuilder)

Example 4 with StatefulSet

use of io.fabric8.kubernetes.api.model.apps.StatefulSet in project strimzi by strimzi.

the class KafkaAssemblyOperatorTest method updateCluster.

private void updateCluster(TestContext context, ConfigMap originalCm, ConfigMap clusterCm, boolean kafkaRolling, boolean zkRolling) {
    KafkaCluster originalKafkaCluster = KafkaCluster.fromConfigMap(originalCm);
    KafkaCluster updatedKafkaCluster = KafkaCluster.fromConfigMap(clusterCm);
    ZookeeperCluster originalZookeeperCluster = ZookeeperCluster.fromConfigMap(originalCm);
    ZookeeperCluster updatedZookeeperCluster = ZookeeperCluster.fromConfigMap(clusterCm);
    TopicController originalTopicController = TopicController.fromConfigMap(originalCm);
    // create CM, Service, headless service, statefulset and so on
    ConfigMapOperator mockCmOps = mock(ConfigMapOperator.class);
    ServiceOperator mockServiceOps = mock(ServiceOperator.class);
    ZookeeperSetOperator mockZsOps = mock(ZookeeperSetOperator.class);
    KafkaSetOperator mockKsOps = mock(KafkaSetOperator.class);
    PvcOperator mockPvcOps = mock(PvcOperator.class);
    DeploymentOperator mockDepOps = mock(DeploymentOperator.class);
    String clusterCmName = clusterCm.getMetadata().getName();
    String clusterCmNamespace = clusterCm.getMetadata().getNamespace();
    // Mock CM get
    when(mockCmOps.get(clusterCmNamespace, clusterCmName)).thenReturn(clusterCm);
    ConfigMap metricsCm = new ConfigMapBuilder().withNewMetadata().withName(KafkaCluster.metricConfigsName(clusterCmName)).withNamespace(clusterCmNamespace).endMetadata().withData(Collections.singletonMap(AbstractModel.METRICS_CONFIG_FILE, METRICS_CONFIG)).build();
    when(mockCmOps.get(clusterCmNamespace, KafkaCluster.metricConfigsName(clusterCmName))).thenReturn(metricsCm);
    ConfigMap zkMetricsCm = new ConfigMapBuilder().withNewMetadata().withName(ZookeeperCluster.zookeeperMetricsName(clusterCmName)).withNamespace(clusterCmNamespace).endMetadata().withData(Collections.singletonMap(AbstractModel.METRICS_CONFIG_FILE, METRICS_CONFIG)).build();
    when(mockCmOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperMetricsName(clusterCmName))).thenReturn(zkMetricsCm);
    // Mock Service gets
    when(mockServiceOps.get(clusterCmNamespace, KafkaCluster.kafkaClusterName(clusterCmName))).thenReturn(originalKafkaCluster.generateService());
    when(mockServiceOps.get(clusterCmNamespace, KafkaCluster.headlessName(clusterCmName))).thenReturn(originalKafkaCluster.generateHeadlessService());
    when(mockServiceOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperClusterName(clusterCmName))).thenReturn(originalKafkaCluster.generateService());
    when(mockServiceOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperHeadlessName(clusterCmName))).thenReturn(originalZookeeperCluster.generateHeadlessService());
    when(mockServiceOps.endpointReadiness(eq(clusterCmNamespace), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
    // Mock StatefulSet get
    when(mockKsOps.get(clusterCmNamespace, KafkaCluster.kafkaClusterName(clusterCmName))).thenReturn(originalKafkaCluster.generateStatefulSet(openShift));
    when(mockZsOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperClusterName(clusterCmName))).thenReturn(originalZookeeperCluster.generateStatefulSet(openShift));
    // Mock Deployment get
    if (originalTopicController != null) {
        when(mockDepOps.get(clusterCmNamespace, TopicController.topicControllerName(clusterCmName))).thenReturn(originalTopicController.generateDeployment());
    }
    // Mock CM patch
    Set<String> metricsCms = set();
    doAnswer(invocation -> {
        metricsCms.add(invocation.getArgument(1));
        return Future.succeededFuture();
    }).when(mockCmOps).reconcile(eq(clusterCmNamespace), anyString(), any());
    // Mock Service patch (both service and headless service
    ArgumentCaptor<String> patchedServicesCaptor = ArgumentCaptor.forClass(String.class);
    when(mockServiceOps.reconcile(eq(clusterCmNamespace), patchedServicesCaptor.capture(), any())).thenReturn(Future.succeededFuture());
    // Mock StatefulSet patch
    when(mockZsOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture(ReconcileResult.patched(zkRolling)));
    when(mockKsOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture(ReconcileResult.patched(kafkaRolling)));
    // Mock StatefulSet rollingUpdate
    Set<String> rollingRestarts = set();
    // Mock StatefulSet scaleUp
    ArgumentCaptor<String> scaledUpCaptor = ArgumentCaptor.forClass(String.class);
    when(mockZsOps.scaleUp(anyString(), scaledUpCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
    // Mock StatefulSet scaleDown
    ArgumentCaptor<String> scaledDownCaptor = ArgumentCaptor.forClass(String.class);
    when(mockZsOps.scaleDown(anyString(), scaledDownCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
    when(mockZsOps.rollingUpdate(anyString(), anyString())).thenAnswer(i -> {
        if (!zkRolling) {
            context.fail("Unexpected rolling update");
        }
        return Future.succeededFuture();
    });
    // ArgumentCaptor<String> scaledUpCaptor = ArgumentCaptor.forClass(String.class);
    when(mockKsOps.scaleUp(anyString(), scaledUpCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
    // Mock StatefulSet scaleDown
    // ArgumentCaptor<String> scaledDownCaptor = ArgumentCaptor.forClass(String.class);
    when(mockKsOps.scaleDown(anyString(), scaledDownCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
    when(mockKsOps.rollingUpdate(anyString(), anyString())).thenAnswer(i -> {
        if (!kafkaRolling) {
            context.fail("Unexpected rolling update");
        }
        return Future.succeededFuture();
    });
    // Mock Deployment patch
    ArgumentCaptor<String> depCaptor = ArgumentCaptor.forClass(String.class);
    when(mockDepOps.reconcile(anyString(), depCaptor.capture(), any())).thenReturn(Future.succeededFuture());
    KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, openShift, ClusterControllerConfig.DEFAULT_OPERATION_TIMEOUT_MS, mockCmOps, mockServiceOps, mockZsOps, mockKsOps, mockPvcOps, mockDepOps);
    // Now try to update a KafkaCluster based on this CM
    Async async = context.async();
    ops.createOrUpdate(new Reconciliation("test-trigger", AssemblyType.KAFKA, clusterCmNamespace, clusterCmName), clusterCm, createResult -> {
        if (createResult.failed())
            createResult.cause().printStackTrace();
        context.assertTrue(createResult.succeeded());
        // rolling restart
        Set<String> expectedRollingRestarts = set();
        if (KafkaSetOperator.needsRollingUpdate(new StatefulSetDiff(originalKafkaCluster.generateStatefulSet(openShift), updatedKafkaCluster.generateStatefulSet(openShift)))) {
            expectedRollingRestarts.add(originalKafkaCluster.getName());
        }
        if (ZookeeperSetOperator.needsRollingUpdate(new StatefulSetDiff(originalZookeeperCluster.generateStatefulSet(openShift), updatedZookeeperCluster.generateStatefulSet(openShift)))) {
            expectedRollingRestarts.add(originalZookeeperCluster.getName());
        }
        // No metrics config  => no CMs created
        verify(mockCmOps, never()).createOrUpdate(any());
        verifyNoMoreInteractions(mockPvcOps);
        async.complete();
    });
}
Also used : KafkaCluster(io.strimzi.controller.cluster.model.KafkaCluster) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) ZookeeperSetOperator(io.strimzi.controller.cluster.operator.resource.ZookeeperSetOperator) ZookeeperCluster(io.strimzi.controller.cluster.model.ZookeeperCluster) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) PvcOperator(io.strimzi.controller.cluster.operator.resource.PvcOperator) ServiceOperator(io.strimzi.controller.cluster.operator.resource.ServiceOperator) TopicController(io.strimzi.controller.cluster.model.TopicController) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) Async(io.vertx.ext.unit.Async) KafkaSetOperator(io.strimzi.controller.cluster.operator.resource.KafkaSetOperator) Reconciliation(io.strimzi.controller.cluster.Reconciliation) StatefulSetDiff(io.strimzi.controller.cluster.operator.resource.StatefulSetDiff) ConfigMapOperator(io.strimzi.controller.cluster.operator.resource.ConfigMapOperator) DeploymentOperator(io.strimzi.controller.cluster.operator.resource.DeploymentOperator)

Example 5 with StatefulSet

use of io.fabric8.kubernetes.api.model.apps.StatefulSet in project strimzi by strimzi.

the class KafkaAssemblyOperatorTest method deleteCluster.

private void deleteCluster(TestContext context, ConfigMap clusterCm) {
    ZookeeperCluster zookeeperCluster = ZookeeperCluster.fromConfigMap(clusterCm);
    KafkaCluster kafkaCluster = KafkaCluster.fromConfigMap(clusterCm);
    TopicController topicController = TopicController.fromConfigMap(clusterCm);
    // create CM, Service, headless service, statefulset
    ConfigMapOperator mockCmOps = mock(ConfigMapOperator.class);
    ServiceOperator mockServiceOps = mock(ServiceOperator.class);
    ZookeeperSetOperator mockZsOps = mock(ZookeeperSetOperator.class);
    KafkaSetOperator mockKsOps = mock(KafkaSetOperator.class);
    PvcOperator mockPvcOps = mock(PvcOperator.class);
    DeploymentOperator mockDepOps = mock(DeploymentOperator.class);
    String clusterCmName = clusterCm.getMetadata().getName();
    String clusterCmNamespace = clusterCm.getMetadata().getNamespace();
    StatefulSet kafkaSs = kafkaCluster.generateStatefulSet(true);
    StatefulSet zkSs = zookeeperCluster.generateStatefulSet(true);
    when(mockKsOps.get(clusterCmNamespace, KafkaCluster.kafkaClusterName(clusterCmName))).thenReturn(kafkaSs);
    when(mockZsOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperClusterName(clusterCmName))).thenReturn(zkSs);
    when(mockCmOps.get(clusterCmNamespace, clusterCmName)).thenReturn(clusterCm);
    ArgumentCaptor<String> serviceCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> ssCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> metricsCaptor = ArgumentCaptor.forClass(String.class);
    when(mockCmOps.reconcile(eq(clusterCmNamespace), metricsCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
    when(mockServiceOps.reconcile(eq(clusterCmNamespace), serviceCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
    when(mockKsOps.reconcile(anyString(), ssCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
    when(mockZsOps.reconcile(anyString(), ssCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
    ArgumentCaptor<String> pvcCaptor = ArgumentCaptor.forClass(String.class);
    when(mockPvcOps.reconcile(eq(clusterCmNamespace), pvcCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
    ArgumentCaptor<String> depCaptor = ArgumentCaptor.forClass(String.class);
    when(mockDepOps.reconcile(eq(clusterCmNamespace), depCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
    if (topicController != null) {
        Deployment tcDep = topicController.generateDeployment();
        when(mockDepOps.get(clusterCmNamespace, TopicController.topicControllerName(clusterCmName))).thenReturn(tcDep);
    }
    KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, openShift, ClusterControllerConfig.DEFAULT_OPERATION_TIMEOUT_MS, mockCmOps, mockServiceOps, mockZsOps, mockKsOps, mockPvcOps, mockDepOps);
    // Now try to delete a KafkaCluster based on this CM
    Async async = context.async();
    ops.delete(new Reconciliation("test-trigger", AssemblyType.KAFKA, clusterCmNamespace, clusterCmName), createResult -> {
        context.assertTrue(createResult.succeeded());
        /*Set<String> metricsNames = new HashSet<>();
            if (kafkaCluster.isMetricsEnabled()) {
                metricsNames.add(KafkaCluster.metricConfigsName(clusterCmName));
            }
            if (zookeeperCluster.isMetricsEnabled()) {
                metricsNames.add(ZookeeperCluster.zookeeperMetricsName(clusterCmName));
            }
            context.assertEquals(metricsNames, captured(metricsCaptor));*/
        verify(mockZsOps).reconcile(eq(clusterCmNamespace), eq(ZookeeperCluster.zookeeperClusterName(clusterCmName)), isNull());
        context.assertEquals(set(ZookeeperCluster.zookeeperHeadlessName(clusterCmName), ZookeeperCluster.zookeeperClusterName(clusterCmName), KafkaCluster.kafkaClusterName(clusterCmName), KafkaCluster.headlessName(clusterCmName)), captured(serviceCaptor));
        // verify deleted Statefulsets
        context.assertEquals(set(zookeeperCluster.getName(), kafkaCluster.getName()), captured(ssCaptor));
        // PvcOperations only used for deletion
        Set<String> expectedPvcDeletions = new HashSet<>();
        for (int i = 0; deleteClaim && i < kafkaCluster.getReplicas(); i++) {
            expectedPvcDeletions.add("data-" + clusterCmName + "-kafka-" + i);
        }
        for (int i = 0; deleteClaim && i < zookeeperCluster.getReplicas(); i++) {
            expectedPvcDeletions.add("data-" + clusterCmName + "-zookeeper-" + i);
        }
        context.assertEquals(expectedPvcDeletions, captured(pvcCaptor));
        // if topic controller configuration was defined in the CM
        if (topicController != null) {
            Set<String> expectedDepNames = new HashSet<>();
            expectedDepNames.add(TopicController.topicControllerName(clusterCmName));
            context.assertEquals(expectedDepNames, captured(depCaptor));
        }
        async.complete();
    });
}
Also used : KafkaCluster(io.strimzi.controller.cluster.model.KafkaCluster) ZookeeperSetOperator(io.strimzi.controller.cluster.operator.resource.ZookeeperSetOperator) Deployment(io.fabric8.kubernetes.api.model.extensions.Deployment) ZookeeperCluster(io.strimzi.controller.cluster.model.ZookeeperCluster) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) PvcOperator(io.strimzi.controller.cluster.operator.resource.PvcOperator) ServiceOperator(io.strimzi.controller.cluster.operator.resource.ServiceOperator) TopicController(io.strimzi.controller.cluster.model.TopicController) Async(io.vertx.ext.unit.Async) KafkaSetOperator(io.strimzi.controller.cluster.operator.resource.KafkaSetOperator) Reconciliation(io.strimzi.controller.cluster.Reconciliation) ConfigMapOperator(io.strimzi.controller.cluster.operator.resource.ConfigMapOperator) DeploymentOperator(io.strimzi.controller.cluster.operator.resource.DeploymentOperator) StatefulSet(io.fabric8.kubernetes.api.model.extensions.StatefulSet) HashSet(java.util.HashSet)

Aggregations

StatefulSet (io.fabric8.kubernetes.api.model.apps.StatefulSet)226 Kafka (io.strimzi.api.kafka.model.Kafka)132 KafkaBuilder (io.strimzi.api.kafka.model.KafkaBuilder)124 ParallelTest (io.strimzi.test.annotations.ParallelTest)100 Test (org.junit.jupiter.api.Test)84 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)68 Reconciliation (io.strimzi.operator.common.Reconciliation)66 ArrayList (java.util.ArrayList)61 Checkpoint (io.vertx.junit5.Checkpoint)60 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)58 Pod (io.fabric8.kubernetes.api.model.Pod)56 KafkaVersionTestUtils (io.strimzi.operator.cluster.KafkaVersionTestUtils)56 ResourceUtils (io.strimzi.operator.cluster.ResourceUtils)52 PasswordGenerator (io.strimzi.operator.common.PasswordGenerator)52 PodOperator (io.strimzi.operator.common.operator.resource.PodOperator)52 Map (java.util.Map)51 CoreMatchers.is (org.hamcrest.CoreMatchers.is)50 PersistentVolumeClaim (io.fabric8.kubernetes.api.model.PersistentVolumeClaim)49 KafkaListenerType (io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType)48 Collections (java.util.Collections)48