use of io.fabric8.karaf.checks.Check in project fabric8 by jboss-fuse.
the class ProfileEditTest method testManipulatePid.
@Test
public void testManipulatePid() throws Exception {
System.err.println(CommandSupport.executeCommand("fabric:create --force --clean -n --wait-for-provisioning"));
BundleContext moduleContext = ServiceLocator.getSystemContext();
ServiceProxy<FabricService> fabricProxy = ServiceProxy.createServiceProxy(moduleContext, FabricService.class);
try {
FabricService fabricService = fabricProxy.getService();
System.err.println(CommandSupport.executeCommand("fabric:profile-edit --pid my.pid/key=value default"));
// Check that my.pid has been added to the default profile
Profile profile = fabricService.getRequiredDefaultVersion().getRequiredProfile("default");
Assert.assertNotNull(profile);
Map<String, Map<String, String>> configurations = profile.getConfigurations();
Assert.assertNotNull(configurations);
Assert.assertTrue(configurations.containsKey("my.pid"));
Map<String, String> myPid = configurations.get("my.pid");
Assert.assertNotNull(myPid);
Assert.assertTrue(myPid.containsKey("key"));
Assert.assertEquals("value", myPid.get("key"));
// Check append options for a pid.
System.err.println(CommandSupport.executeCommand("fabric:profile-edit --append --pid my.pid/key=othervalue default"));
profile = fabricService.getRequiredDefaultVersion().getRequiredProfile("default");
configurations = profile.getConfigurations();
Assert.assertTrue(configurations.containsKey("my.pid"));
myPid = configurations.get("my.pid");
Assert.assertNotNull(myPid);
Assert.assertTrue(myPid.containsKey("key"));
Assert.assertEquals("value,othervalue", myPid.get("key"));
System.err.println(CommandSupport.executeCommand("fabric:profile-edit --remove --pid my.pid/key=value default"));
profile = fabricService.getRequiredDefaultVersion().getRequiredProfile("default");
configurations = profile.getConfigurations();
Assert.assertTrue(configurations.containsKey("my.pid"));
myPid = configurations.get("my.pid");
Assert.assertNotNull(myPid);
Assert.assertTrue(myPid.containsKey("key"));
Assert.assertEquals("othervalue", myPid.get("key"));
// Check append options for a pid.
System.err.println(CommandSupport.executeCommand("fabric:profile-edit --remove --pid my.pid/key=othervalue default"));
profile = fabricService.getRequiredDefaultVersion().getRequiredProfile("default");
configurations = profile.getConfigurations();
Assert.assertTrue(configurations.containsKey("my.pid"));
myPid = configurations.get("my.pid");
Assert.assertNotNull(myPid);
Assert.assertTrue(myPid.containsKey("key"));
Assert.assertEquals("", myPid.get("key"));
// Check assign option with '='.
System.err.println(CommandSupport.executeCommand("fabric:profile-edit --pid my.pid/key=prop1=value1 default"));
profile = fabricService.getRequiredDefaultVersion().getRequiredProfile("default");
configurations = profile.getConfigurations();
Assert.assertTrue(configurations.containsKey("my.pid"));
myPid = configurations.get("my.pid");
Assert.assertNotNull(myPid);
Assert.assertTrue(myPid.containsKey("key"));
Assert.assertEquals("prop1=value1", myPid.get("key"));
// Check multiple properties
System.err.println(CommandSupport.executeCommand("fabric:profile-edit --pid my.pid/key1=value1 --pid my.pid/key2=value2 default"));
profile = fabricService.getRequiredDefaultVersion().getRequiredProfile("default");
configurations = profile.getConfigurations();
Assert.assertTrue(configurations.containsKey("my.pid"));
myPid = configurations.get("my.pid");
Assert.assertNotNull(myPid);
Assert.assertTrue(myPid.containsKey("key1"));
Assert.assertEquals("value1", myPid.get("key1"));
Assert.assertTrue(myPid.containsKey("key2"));
Assert.assertEquals("value2", myPid.get("key2"));
// Check import pid
System.err.println(CommandSupport.executeCommands("config:edit my.pid2", "config:propset key1 value1", "config:propset key2 value2", "config:update"));
System.err.println(CommandSupport.executeCommand("fabric:profile-edit --pid my.pid2 --import-pid default"));
// Check that my.pid has been added to the default profile
profile = fabricService.getRequiredDefaultVersion().getRequiredProfile("default");
Assert.assertNotNull(profile);
configurations = profile.getConfigurations();
Assert.assertNotNull(configurations);
Assert.assertTrue(configurations.containsKey("my.pid2"));
Map<String, String> myPid2 = configurations.get("my.pid2");
System.out.println("my.pid2 => " + myPid2);
Assert.assertNotNull(myPid2);
Assert.assertTrue(myPid2.containsKey("key1"));
Assert.assertEquals("value1", myPid2.get("key1"));
Assert.assertTrue(myPid2.containsKey("key2"));
Assert.assertEquals("value2", myPid2.get("key2"));
System.err.println(CommandSupport.executeCommand("fabric:profile-edit --pid my.pid2/key1 --delete default"));
profile = fabricService.getRequiredDefaultVersion().getRequiredProfile("default");
Map<String, String> configuration = profile.getConfiguration("my.pid2");
Assert.assertFalse(configuration.containsKey("key1"));
System.err.println(CommandSupport.executeCommand("fabric:profile-edit --pid my.pid2 --delete default"));
profile = fabricService.getRequiredDefaultVersion().getRequiredProfile("default");
configurations = profile.getConfigurations();
Assert.assertFalse(configurations.containsKey("my.pid2"));
} finally {
fabricProxy.close();
}
}
use of io.fabric8.karaf.checks.Check in project strimzi by strimzi.
the class KafkaClusterTest method testClusterFromStatefulSet.
/**
* Check that a KafkaCluster from a statefulset matches the one from a ConfigMap
*/
@Test
public void testClusterFromStatefulSet() {
StatefulSet ss = kc.generateStatefulSet(true);
KafkaCluster kc2 = KafkaCluster.fromAssembly(ss, namespace, cluster);
// Don't check the metrics CM, since this isn't restored from the StatefulSet
checkService(kc2.generateService());
checkHeadlessService(kc2.generateHeadlessService());
checkStatefulSet(kc2.generateStatefulSet(true));
}
use of io.fabric8.karaf.checks.Check in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockIT method testUpdateKafkaWithChangedPersistentVolume.
@Test
public void testUpdateKafkaWithChangedPersistentVolume(TestContext context) {
if (!Storage.StorageType.PERSISTENT_CLAIM.equals(storageType(kafkaStorage))) {
LOGGER.info("Skipping claim-based test because using storage type {}", kafkaStorage);
return;
}
KafkaAssemblyOperator kco = createCluster(context);
String originalStorageClass = storageClass(kafkaStorage);
assertStorageClass(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalStorageClass);
Async updateAsync = context.async();
// Try to update the storage class
String changedClass = originalStorageClass + "2";
HashMap<String, String> data = new HashMap<>(cluster.getData());
data.put(KafkaCluster.KEY_STORAGE, new JsonObject(kafkaStorage.toString()).put(Storage.STORAGE_CLASS_FIELD, changedClass).toString());
ConfigMap changedClusterCm = new ConfigMapBuilder(cluster).withData(data).build();
mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).patch(changedClusterCm);
LOGGER.info("Updating with changed storage class");
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
// Check the storage class was not changed
assertStorageClass(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalStorageClass);
updateAsync.complete();
});
}
use of io.fabric8.karaf.checks.Check in project strimzi by strimzi.
the class OpenShiftTemplatesTest method testStrimziEphemeralWithCustomParameters.
@Test
public void testStrimziEphemeralWithCustomParameters() {
String clusterName = "test-ephemeral-with-custom-parameters";
oc.newApp("strimzi-ephemeral", map("CLUSTER_NAME", clusterName, "ZOOKEEPER_HEALTHCHECK_DELAY", "30", "ZOOKEEPER_HEALTHCHECK_TIMEOUT", "10", "KAFKA_HEALTHCHECK_DELAY", "30", "KAFKA_HEALTHCHECK_TIMEOUT", "10", "KAFKA_DEFAULT_REPLICATION_FACTOR", "2", "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR", "5", "KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", "5"));
// TODO Add assertions to check that Kafka brokers have a custom configuration
ConfigMap cm = client.configMaps().inNamespace(NAMESPACE).withName(clusterName).get();
assertNotNull(cm);
Map<String, String> cmData = cm.getData();
assertEquals("30", cmData.get("zookeeper-healthcheck-delay"));
assertEquals("10", cmData.get("zookeeper-healthcheck-timeout"));
assertEquals("30", cmData.get("kafka-healthcheck-delay"));
assertEquals("10", cmData.get("kafka-healthcheck-timeout"));
assertEquals("2", cmData.get("KAFKA_DEFAULT_REPLICATION_FACTOR"));
assertEquals("5", cmData.get("KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR"));
assertEquals("5", cmData.get("KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR"));
}
use of io.fabric8.karaf.checks.Check in project strimzi by strimzi.
the class KafkaClusterTest method testKafkaScaleUpScaleDown.
@Test
@KafkaCluster(name = "my-cluster", kafkaNodes = 3)
public void testKafkaScaleUpScaleDown() {
// kafka cluster already deployed via annotation
String clusterName = "my-cluster";
LOGGER.info("Running kafkaScaleUpScaleDown {}", clusterName);
// kubeClient.waitForStatefulSet(kafkaStatefulSetName(clusterName), 3);
KubernetesClient client = new DefaultKubernetesClient();
final int initialReplicas = client.apps().statefulSets().inNamespace(NAMESPACE).withName(kafkaStatefulSetName(clusterName)).get().getStatus().getReplicas();
assertEquals(3, initialReplicas);
// scale up
final int scaleTo = initialReplicas + 1;
final int newPodId = initialReplicas;
final int newBrokerId = newPodId;
final String newPodName = kafkaPodName(clusterName, newPodId);
final String firstPodName = kafkaPodName(clusterName, 0);
LOGGER.info("Scaling Kafka up to {}", scaleTo);
replaceCm(clusterName, "kafka-nodes", String.valueOf(initialReplicas + 1));
kubeClient.waitForStatefulSet(kafkaStatefulSetName(clusterName), initialReplicas + 1);
// Test that the new broker has joined the kafka cluster by checking it knows about all the other broker's API versions
// (execute bash because we want the env vars expanded in the pod)
String versions = getBrokerApiVersions(newPodName);
for (int brokerId = 0; brokerId < scaleTo; brokerId++) {
assertTrue(versions, versions.indexOf("(id: " + brokerId + " rack: ") >= 0);
}
// TODO Check for k8s events, logs for errors
// scale down
LOGGER.info("Scaling down");
// client.apps().statefulSets().inNamespace(NAMESPACE).withName(kafkaStatefulSetName(clusterName)).scale(initialReplicas, true);
replaceCm(clusterName, "kafka-nodes", String.valueOf(initialReplicas));
kubeClient.waitForStatefulSet(kafkaStatefulSetName(clusterName), initialReplicas);
final int finalReplicas = client.apps().statefulSets().inNamespace(NAMESPACE).withName(kafkaStatefulSetName(clusterName)).get().getStatus().getReplicas();
assertEquals(initialReplicas, finalReplicas);
versions = getBrokerApiVersions(firstPodName);
assertTrue("Expect the added broker, " + newBrokerId + ", to no longer be present in output of kafka-broker-api-versions.sh", versions.indexOf("(id: " + newBrokerId + " rack: ") == -1);
// TODO Check for k8s events, logs for errors
}
Aggregations