use of io.fabric8.agent.model.Feature in project fabric8 by jboss-fuse.
the class ContainerUpgradeAndRollbackTest method testContainerUpgradeAndRollback.
/**
* This tests the simple scenario of
* 1. create a child container
* 2. create a new version
* 3. modify the profile of the new version
* 4. upgrade all containers
* 5. verify that child is provisioned according to the new version
* 6. rollback containers.
* 7. verify that the child is provisioned according to the old version.
*/
@Test
@SuppressWarnings("unchecked")
public void testContainerUpgradeAndRollback() throws Exception {
CommandSupport.executeCommand("fabric:create --force --clean -n --wait-for-provisioning");
BundleContext moduleContext = ServiceLocator.getSystemContext();
ServiceProxy<FabricService> fabricProxy = ServiceProxy.createServiceProxy(moduleContext, FabricService.class);
try {
FabricService fabricService = fabricProxy.getService();
Set<Container> containers = null;
try {
CommandSupport.executeCommand("fabric:version-create --parent 1.0 1.1");
// Make sure that the profile change has been applied before changing the version
CountDownLatch latch = WaitForConfigurationChange.on(fabricService);
CommandSupport.executeCommand("fabric:profile-edit --feature camel-script --feature camel-hazelcast feature-camel 1.1");
Assert.assertTrue(latch.await(5, TimeUnit.SECONDS));
CommandSupport.executeCommand("fabric:profile-display --version 1.1 feature-camel");
containers = ContainerBuilder.create().withName("smoke_camel").withProfiles("feature-camel").assertProvisioningResult().build(fabricService);
CommandSupport.executeCommand("fabric:container-upgrade --all 1.1");
ProvisionSupport.provisioningSuccess(containers, ProvisionSupport.PROVISION_TIMEOUT);
CommandSupport.executeCommand("fabric:container-list");
for (Container container : containers) {
Assert.assertEquals("Container should have version 1.1", "1.1", container.getVersion().getId());
String bundles = CommandSupport.executeCommand("fabric:container-connect -u admin -p admin " + container.getId() + " osgi:list -t 0 -s | grep camel-hazelcast");
Assert.assertNotNull(bundles);
System.out.println(bundles);
Assert.assertFalse("Expected camel-hazelcast installed on container: " + container.getId(), bundles.isEmpty());
}
CommandSupport.executeCommand("fabric:container-rollback --all 1.0");
ProvisionSupport.provisioningSuccess(containers, ProvisionSupport.PROVISION_TIMEOUT);
CommandSupport.executeCommand("fabric:container-list");
for (Container container : containers) {
Assert.assertEquals("Container should have version 1.0", "1.0", container.getVersion().getId());
String bundles = CommandSupport.executeCommand("fabric:container-connect -u admin -p admin " + container.getId() + " osgi:list -t 0 -s | grep camel-hazelcast");
Assert.assertNotNull(bundles);
System.out.println(bundles);
Assert.assertTrue("Expected no camel-hazelcast installed on container: " + container.getId(), bundles.isEmpty());
}
} finally {
ContainerBuilder.stop(fabricService, containers);
}
} finally {
fabricProxy.close();
}
}
use of io.fabric8.agent.model.Feature in project fabric8-maven-plugin by fabric8io.
the class KarafHealthCheckEnricher method discoverKarafProbe.
//
// Karaf has a readiness/health URL exposed if the fabric8-karaf-check feature is installed.
//
private Probe discoverKarafProbe(String path, int initialDelay) {
for (Plugin plugin : this.getProject().getBuildPlugins()) {
if ("karaf-maven-plugin".equals(plugin.getArtifactId())) {
Xpp3Dom configuration = (Xpp3Dom) plugin.getConfiguration();
if (configuration == null)
return null;
Xpp3Dom startupFeatures = configuration.getChild("startupFeatures");
if (startupFeatures == null)
return null;
for (Xpp3Dom feature : startupFeatures.getChildren("feature")) {
if ("fabric8-karaf-checks".equals(feature.getValue())) {
// TODO: handle the case where the user changes the default port
return new ProbeBuilder().withNewHttpGet().withNewPort(DEFAULT_HEALTH_CHECK_PORT).withPath(path).endHttpGet().withInitialDelaySeconds(initialDelay).build();
}
}
}
}
return null;
}
use of io.fabric8.agent.model.Feature in project strimzi by strimzi.
the class VersionChangeCreator method getVersionFromController.
/**
* Collects the information whether the controller resource (StatefulSet or PodSet) exists and what Kafka versions
* they carry in their annotations.
*
* @return Future which completes when the version is collected from the controller resource
*/
private Future<Void> getVersionFromController() {
Future<StatefulSet> stsFuture = stsOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name()));
Future<StrimziPodSet> podSetFuture = strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name()));
return CompositeFuture.join(stsFuture, podSetFuture).compose(res -> {
StatefulSet sts = res.resultAt(0);
StrimziPodSet podSet = res.resultAt(1);
if (sts != null && podSet != null) {
// Both StatefulSet and PodSet exist => we create the description based on the feature gate
if (featureGates.useStrimziPodSetsEnabled()) {
versionFromControllerResource = Annotations.annotations(podSet).get(ANNO_STRIMZI_IO_KAFKA_VERSION);
} else {
versionFromControllerResource = Annotations.annotations(sts).get(ANNO_STRIMZI_IO_KAFKA_VERSION);
}
freshDeployment = false;
} else if (sts != null) {
// StatefulSet exists, PodSet does nto exist => we create the description from the StatefulSet
versionFromControllerResource = Annotations.annotations(sts).get(ANNO_STRIMZI_IO_KAFKA_VERSION);
freshDeployment = false;
} else if (podSet != null) {
// PodSet exists, StatefulSet does not => we create the description from the PodSet
versionFromControllerResource = Annotations.annotations(podSet).get(ANNO_STRIMZI_IO_KAFKA_VERSION);
freshDeployment = false;
}
return Future.succeededFuture();
});
}
use of io.fabric8.agent.model.Feature in project strimzi by strimzi.
the class FeatureGatesIsolatedST method testKRaftMode.
/**
* UseKRaft feature gate
*/
@IsolatedTest("Feature Gates test for enabled UseKRaft gate")
@Tag(INTERNAL_CLIENTS_USED)
public void testKRaftMode(ExtensionContext extensionContext) {
assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
int messageCount = 180;
List<EnvVar> testEnvVars = new ArrayList<>();
int kafkaReplicas = 3;
testEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "+UseStrimziPodSets,+UseKRaft", null));
clusterOperator.unInstall();
clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(testEnvVars).createInstallation().runInstallation();
Kafka kafka = KafkaTemplates.kafkaPersistent(clusterName, kafkaReplicas).build();
// The builder cannot disable the EO. It has to be done this way.
kafka.getSpec().setEntityOperator(null);
resourceManager.createResource(extensionContext, kafka);
LOGGER.info("Try to send some messages to Kafka over next few minutes.");
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(500).withNamespaceName(INFRA_NAMESPACE).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
// Check that there is no ZooKeeper
Map<String, String> zkPods = PodUtils.podSnapshot(INFRA_NAMESPACE, zkSelector);
assertThat("No ZooKeeper pods should exist", zkPods.size(), is(0));
// Roll Kafka
LOGGER.info("Force Rolling Update of Kafka via read-only configuration change.");
Map<String, String> kafkaPods = PodUtils.podSnapshot(INFRA_NAMESPACE, kafkaSelector);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getKafka().getConfig().put("log.retention.hours", 72), INFRA_NAMESPACE);
LOGGER.info("Wait for next reconciliation to happen.");
RollingUpdateUtils.waitTillComponentHasRolled(INFRA_NAMESPACE, kafkaSelector, kafkaReplicas, kafkaPods);
LOGGER.info("Waiting for clients to finish sending/receiving messages.");
ClientUtils.waitForClientsSuccess(producerName, consumerName, INFRA_NAMESPACE, MESSAGE_COUNT);
}
use of io.fabric8.agent.model.Feature in project strimzi by strimzi.
the class FeatureGatesIsolatedST method testControlPlaneListenerFeatureGate.
/**
* Control Plane Listener
* https://github.com/strimzi/proposals/blob/main/025-control-plain-listener.md
*/
@IsolatedTest("Feature Gates test for disabled ControlPlainListener")
@Tag(INTERNAL_CLIENTS_USED)
public void testControlPlaneListenerFeatureGate(ExtensionContext extensionContext) {
assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
int messageCount = 300;
List<EnvVar> testEnvVars = new ArrayList<>();
int kafkaReplicas = 1;
testEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "-ControlPlaneListener", null));
clusterOperator.unInstall();
clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(testEnvVars).createInstallation().runInstallation();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaReplicas).build());
LOGGER.info("Check for presence of ContainerPort 9090/tcp (tcp-ctrlplane) in first Kafka pod.");
final Pod kafkaPod = PodUtils.getPodsByPrefixInNameWithDynamicWait(clusterOperator.getDeploymentNamespace(), clusterName + "-kafka-").get(0);
ContainerPort expectedControlPlaneContainerPort = new ContainerPort(9090, null, null, "tcp-ctrlplane", "TCP");
List<ContainerPort> kafkaPodPorts = kafkaPod.getSpec().getContainers().get(0).getPorts();
assertTrue(kafkaPodPorts.contains(expectedControlPlaneContainerPort));
Map<String, String> kafkaPods = PodUtils.podSnapshot(clusterOperator.getDeploymentNamespace(), kafkaSelector);
LOGGER.info("Try to send some messages to Kafka over next few minutes.");
KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(clusterName, topicName).editSpec().withReplicas(kafkaReplicas).withPartitions(kafkaReplicas).endSpec().build();
resourceManager.createResource(extensionContext, kafkaTopic);
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(500).withNamespaceName(clusterOperator.getDeploymentNamespace()).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
JobUtils.waitForJobRunning(consumerName, clusterOperator.getDeploymentNamespace());
LOGGER.info("Delete first found Kafka broker pod.");
kubeClient().deletePod(clusterOperator.getDeploymentNamespace(), kafkaPod);
RollingUpdateUtils.waitForComponentAndPodsReady(kafkaSelector, kafkaReplicas);
LOGGER.info("Force Rolling Update of Kafka via annotation.");
kafkaPods.keySet().forEach(podName -> {
kubeClient(clusterOperator.getDeploymentNamespace()).editPod(podName).edit(pod -> new PodBuilder(pod).editMetadata().addToAnnotations(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true").endMetadata().build());
});
LOGGER.info("Wait for next reconciliation to happen.");
RollingUpdateUtils.waitTillComponentHasRolled(clusterOperator.getDeploymentNamespace(), kafkaSelector, kafkaReplicas, kafkaPods);
LOGGER.info("Waiting for clients to finish sending/receiving messages.");
ClientUtils.waitForClientSuccess(producerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
ClientUtils.waitForClientSuccess(consumerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
}
Aggregations