Search in sources :

Example 71 with Feature

use of io.fabric8.agent.model.Feature in project fabric8 by jboss-fuse.

the class ContainerUpgradeAndRollbackTest method testContainerUpgradeAndRollback.

/**
 * This tests the simple scenario of
 * 1. create a child container
 * 2. create a new version
 * 3. modify the profile of the new version
 * 4. upgrade all containers
 * 5. verify that child is provisioned according to the new version
 * 6. rollback containers.
 * 7. verify that the child is provisioned according to the old version.
 */
@Test
@SuppressWarnings("unchecked")
public void testContainerUpgradeAndRollback() throws Exception {
    CommandSupport.executeCommand("fabric:create --force --clean -n  --wait-for-provisioning");
    BundleContext moduleContext = ServiceLocator.getSystemContext();
    ServiceProxy<FabricService> fabricProxy = ServiceProxy.createServiceProxy(moduleContext, FabricService.class);
    try {
        FabricService fabricService = fabricProxy.getService();
        Set<Container> containers = null;
        try {
            CommandSupport.executeCommand("fabric:version-create --parent 1.0 1.1");
            // Make sure that the profile change has been applied before changing the version
            CountDownLatch latch = WaitForConfigurationChange.on(fabricService);
            CommandSupport.executeCommand("fabric:profile-edit --feature camel-script --feature camel-hazelcast feature-camel 1.1");
            Assert.assertTrue(latch.await(5, TimeUnit.SECONDS));
            CommandSupport.executeCommand("fabric:profile-display --version 1.1 feature-camel");
            containers = ContainerBuilder.create().withName("smoke_camel").withProfiles("feature-camel").assertProvisioningResult().build(fabricService);
            CommandSupport.executeCommand("fabric:container-upgrade --all 1.1");
            ProvisionSupport.provisioningSuccess(containers, ProvisionSupport.PROVISION_TIMEOUT);
            CommandSupport.executeCommand("fabric:container-list");
            for (Container container : containers) {
                Assert.assertEquals("Container should have version 1.1", "1.1", container.getVersion().getId());
                String bundles = CommandSupport.executeCommand("fabric:container-connect -u admin -p admin " + container.getId() + " osgi:list -t 0 -s | grep camel-hazelcast");
                Assert.assertNotNull(bundles);
                System.out.println(bundles);
                Assert.assertFalse("Expected camel-hazelcast installed on container: " + container.getId(), bundles.isEmpty());
            }
            CommandSupport.executeCommand("fabric:container-rollback --all 1.0");
            ProvisionSupport.provisioningSuccess(containers, ProvisionSupport.PROVISION_TIMEOUT);
            CommandSupport.executeCommand("fabric:container-list");
            for (Container container : containers) {
                Assert.assertEquals("Container should have version 1.0", "1.0", container.getVersion().getId());
                String bundles = CommandSupport.executeCommand("fabric:container-connect -u admin -p admin " + container.getId() + " osgi:list -t 0 -s | grep camel-hazelcast");
                Assert.assertNotNull(bundles);
                System.out.println(bundles);
                Assert.assertTrue("Expected no camel-hazelcast installed on container: " + container.getId(), bundles.isEmpty());
            }
        } finally {
            ContainerBuilder.stop(fabricService, containers);
        }
    } finally {
        fabricProxy.close();
    }
}
Also used : Container(io.fabric8.api.Container) FabricService(io.fabric8.api.FabricService) CountDownLatch(java.util.concurrent.CountDownLatch) BundleContext(org.osgi.framework.BundleContext) Test(org.junit.Test)

Example 72 with Feature

use of io.fabric8.agent.model.Feature in project fabric8-maven-plugin by fabric8io.

the class KarafHealthCheckEnricher method discoverKarafProbe.

// 
// Karaf has a readiness/health URL exposed if the fabric8-karaf-check feature is installed.
// 
private Probe discoverKarafProbe(String path, int initialDelay) {
    for (Plugin plugin : this.getProject().getBuildPlugins()) {
        if ("karaf-maven-plugin".equals(plugin.getArtifactId())) {
            Xpp3Dom configuration = (Xpp3Dom) plugin.getConfiguration();
            if (configuration == null)
                return null;
            Xpp3Dom startupFeatures = configuration.getChild("startupFeatures");
            if (startupFeatures == null)
                return null;
            for (Xpp3Dom feature : startupFeatures.getChildren("feature")) {
                if ("fabric8-karaf-checks".equals(feature.getValue())) {
                    // TODO: handle the case where the user changes the default port
                    return new ProbeBuilder().withNewHttpGet().withNewPort(DEFAULT_HEALTH_CHECK_PORT).withPath(path).endHttpGet().withInitialDelaySeconds(initialDelay).build();
                }
            }
        }
    }
    return null;
}
Also used : Xpp3Dom(org.codehaus.plexus.util.xml.Xpp3Dom) ProbeBuilder(io.fabric8.kubernetes.api.model.ProbeBuilder) Plugin(org.apache.maven.model.Plugin)

Example 73 with Feature

use of io.fabric8.agent.model.Feature in project strimzi by strimzi.

the class VersionChangeCreator method getVersionFromController.

/**
 * Collects the information whether the controller resource (StatefulSet or PodSet) exists and what Kafka versions
 * they carry in their annotations.
 *
 * @return  Future which completes when the version is collected from the controller resource
 */
private Future<Void> getVersionFromController() {
    Future<StatefulSet> stsFuture = stsOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name()));
    Future<StrimziPodSet> podSetFuture = strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name()));
    return CompositeFuture.join(stsFuture, podSetFuture).compose(res -> {
        StatefulSet sts = res.resultAt(0);
        StrimziPodSet podSet = res.resultAt(1);
        if (sts != null && podSet != null) {
            // Both StatefulSet and PodSet exist => we create the description based on the feature gate
            if (featureGates.useStrimziPodSetsEnabled()) {
                versionFromControllerResource = Annotations.annotations(podSet).get(ANNO_STRIMZI_IO_KAFKA_VERSION);
            } else {
                versionFromControllerResource = Annotations.annotations(sts).get(ANNO_STRIMZI_IO_KAFKA_VERSION);
            }
            freshDeployment = false;
        } else if (sts != null) {
            // StatefulSet exists, PodSet does nto exist => we create the description from the StatefulSet
            versionFromControllerResource = Annotations.annotations(sts).get(ANNO_STRIMZI_IO_KAFKA_VERSION);
            freshDeployment = false;
        } else if (podSet != null) {
            // PodSet exists, StatefulSet does not => we create the description from the PodSet
            versionFromControllerResource = Annotations.annotations(podSet).get(ANNO_STRIMZI_IO_KAFKA_VERSION);
            freshDeployment = false;
        }
        return Future.succeededFuture();
    });
}
Also used : StrimziPodSet(io.strimzi.api.kafka.model.StrimziPodSet) StatefulSet(io.fabric8.kubernetes.api.model.apps.StatefulSet)

Example 74 with Feature

use of io.fabric8.agent.model.Feature in project strimzi by strimzi.

the class FeatureGatesIsolatedST method testKRaftMode.

/**
 * UseKRaft feature gate
 */
@IsolatedTest("Feature Gates test for enabled UseKRaft gate")
@Tag(INTERNAL_CLIENTS_USED)
public void testKRaftMode(ExtensionContext extensionContext) {
    assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
    final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    int messageCount = 180;
    List<EnvVar> testEnvVars = new ArrayList<>();
    int kafkaReplicas = 3;
    testEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "+UseStrimziPodSets,+UseKRaft", null));
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(testEnvVars).createInstallation().runInstallation();
    Kafka kafka = KafkaTemplates.kafkaPersistent(clusterName, kafkaReplicas).build();
    // The builder cannot disable the EO. It has to be done this way.
    kafka.getSpec().setEntityOperator(null);
    resourceManager.createResource(extensionContext, kafka);
    LOGGER.info("Try to send some messages to Kafka over next few minutes.");
    KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(500).withNamespaceName(INFRA_NAMESPACE).build();
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
    // Check that there is no ZooKeeper
    Map<String, String> zkPods = PodUtils.podSnapshot(INFRA_NAMESPACE, zkSelector);
    assertThat("No ZooKeeper pods should exist", zkPods.size(), is(0));
    // Roll Kafka
    LOGGER.info("Force Rolling Update of Kafka via read-only configuration change.");
    Map<String, String> kafkaPods = PodUtils.podSnapshot(INFRA_NAMESPACE, kafkaSelector);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getKafka().getConfig().put("log.retention.hours", 72), INFRA_NAMESPACE);
    LOGGER.info("Wait for next reconciliation to happen.");
    RollingUpdateUtils.waitTillComponentHasRolled(INFRA_NAMESPACE, kafkaSelector, kafkaReplicas, kafkaPods);
    LOGGER.info("Waiting for clients to finish sending/receiving messages.");
    ClientUtils.waitForClientsSuccess(producerName, consumerName, INFRA_NAMESPACE, MESSAGE_COUNT);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) ArrayList(java.util.ArrayList) Kafka(io.strimzi.api.kafka.model.Kafka) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) Random(java.util.Random) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) IsolatedTest(io.strimzi.test.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 75 with Feature

use of io.fabric8.agent.model.Feature in project strimzi by strimzi.

the class FeatureGatesIsolatedST method testControlPlaneListenerFeatureGate.

/**
 * Control Plane Listener
 * https://github.com/strimzi/proposals/blob/main/025-control-plain-listener.md
 */
@IsolatedTest("Feature Gates test for disabled ControlPlainListener")
@Tag(INTERNAL_CLIENTS_USED)
public void testControlPlaneListenerFeatureGate(ExtensionContext extensionContext) {
    assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    int messageCount = 300;
    List<EnvVar> testEnvVars = new ArrayList<>();
    int kafkaReplicas = 1;
    testEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "-ControlPlaneListener", null));
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(testEnvVars).createInstallation().runInstallation();
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaReplicas).build());
    LOGGER.info("Check for presence of ContainerPort 9090/tcp (tcp-ctrlplane) in first Kafka pod.");
    final Pod kafkaPod = PodUtils.getPodsByPrefixInNameWithDynamicWait(clusterOperator.getDeploymentNamespace(), clusterName + "-kafka-").get(0);
    ContainerPort expectedControlPlaneContainerPort = new ContainerPort(9090, null, null, "tcp-ctrlplane", "TCP");
    List<ContainerPort> kafkaPodPorts = kafkaPod.getSpec().getContainers().get(0).getPorts();
    assertTrue(kafkaPodPorts.contains(expectedControlPlaneContainerPort));
    Map<String, String> kafkaPods = PodUtils.podSnapshot(clusterOperator.getDeploymentNamespace(), kafkaSelector);
    LOGGER.info("Try to send some messages to Kafka over next few minutes.");
    KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(clusterName, topicName).editSpec().withReplicas(kafkaReplicas).withPartitions(kafkaReplicas).endSpec().build();
    resourceManager.createResource(extensionContext, kafkaTopic);
    KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(500).withNamespaceName(clusterOperator.getDeploymentNamespace()).build();
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
    JobUtils.waitForJobRunning(consumerName, clusterOperator.getDeploymentNamespace());
    LOGGER.info("Delete first found Kafka broker pod.");
    kubeClient().deletePod(clusterOperator.getDeploymentNamespace(), kafkaPod);
    RollingUpdateUtils.waitForComponentAndPodsReady(kafkaSelector, kafkaReplicas);
    LOGGER.info("Force Rolling Update of Kafka via annotation.");
    kafkaPods.keySet().forEach(podName -> {
        kubeClient(clusterOperator.getDeploymentNamespace()).editPod(podName).edit(pod -> new PodBuilder(pod).editMetadata().addToAnnotations(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true").endMetadata().build());
    });
    LOGGER.info("Wait for next reconciliation to happen.");
    RollingUpdateUtils.waitTillComponentHasRolled(clusterOperator.getDeploymentNamespace(), kafkaSelector, kafkaReplicas, kafkaPods);
    LOGGER.info("Waiting for clients to finish sending/receiving messages.");
    ClientUtils.waitForClientSuccess(producerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
    ClientUtils.waitForClientSuccess(consumerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) Pod(io.fabric8.kubernetes.api.model.Pod) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) PodBuilder(io.fabric8.kubernetes.api.model.PodBuilder) ArrayList(java.util.ArrayList) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) Random(java.util.Random) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) ContainerPort(io.fabric8.kubernetes.api.model.ContainerPort) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) IsolatedTest(io.strimzi.test.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Aggregations

Test (org.junit.Test)20 HashMap (java.util.HashMap)19 ArrayList (java.util.ArrayList)17 FabricService (io.fabric8.api.FabricService)15 IOException (java.io.IOException)15 Feature (io.fabric8.agent.model.Feature)12 Container (io.fabric8.api.Container)11 Profile (io.fabric8.api.Profile)11 File (java.io.File)11 List (java.util.List)11 Map (java.util.Map)10 Kafka (io.strimzi.api.kafka.model.Kafka)8 Repository (io.fabric8.agent.model.Repository)6 Version (io.fabric8.api.Version)6 Role (io.fabric8.kubernetes.api.model.rbac.Role)6 RoleBinding (io.fabric8.kubernetes.api.model.rbac.RoleBinding)6 RoleRef (io.fabric8.kubernetes.api.model.rbac.RoleRef)6 RoleRefBuilder (io.fabric8.kubernetes.api.model.rbac.RoleRefBuilder)6 KafkaBuilder (io.strimzi.api.kafka.model.KafkaBuilder)6 HashSet (java.util.HashSet)6