use of io.strimzi.controller.cluster.Reconciliation in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockIT method updateClusterWithoutStatefulSet.
private void updateClusterWithoutStatefulSet(TestContext context, String statefulSet) {
KafkaAssemblyOperator kco = createCluster(context);
mockClient.apps().statefulSets().inNamespace(NAMESPACE).withName(statefulSet).delete();
assertNull("Expected ss " + statefulSet + " to be not exist", mockClient.apps().statefulSets().inNamespace(NAMESPACE).withName(statefulSet).get());
LOGGER.info("Reconciling again -> update");
Async updateAsync = context.async();
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
assertNotNull("Expected ss " + statefulSet + " to have been recreated", mockClient.apps().statefulSets().inNamespace(NAMESPACE).withName(statefulSet).get());
updateAsync.complete();
});
}
use of io.strimzi.controller.cluster.Reconciliation in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockIT method testUpdateKafkaWithChangedDeleteClaim.
/**
* Test that we can change the deleteClaim flag, and that it's honoured
*/
@Test
public void testUpdateKafkaWithChangedDeleteClaim(TestContext context) {
if (!Storage.StorageType.PERSISTENT_CLAIM.equals(storageType(kafkaStorage))) {
LOGGER.info("Skipping claim-based test because using storage type {}", kafkaStorage);
return;
}
Set<String> allPvcs = new HashSet<>();
Set<String> kafkaPvcs = createPvcs(kafkaStorage, kafkaReplicas, podId -> KafkaCluster.getPersistentVolumeClaimName(KafkaCluster.kafkaClusterName(CLUSTER_NAME), podId));
Set<String> zkPvcs = createPvcs(zkStorage, zkReplicas, podId -> ZookeeperCluster.getPersistentVolumeClaimName(ZookeeperCluster.zookeeperClusterName(CLUSTER_NAME), podId));
allPvcs.addAll(kafkaPvcs);
allPvcs.addAll(zkPvcs);
KafkaAssemblyOperator kco = createCluster(context);
boolean originalKafkaDeleteClaim = deleteClaim(kafkaStorage);
// assertDeleteClaim(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalKafkaDeleteClaim);
// Try to update the storage class
boolean changedKafkaDeleteClaim = !originalKafkaDeleteClaim;
HashMap<String, String> data = new HashMap<>(cluster.getData());
data.put(KafkaCluster.KEY_STORAGE, new JsonObject(kafkaStorage.toString()).put(Storage.DELETE_CLAIM_FIELD, changedKafkaDeleteClaim).toString());
ConfigMap changedClusterCm = new ConfigMapBuilder(cluster).withData(data).build();
mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).patch(changedClusterCm);
LOGGER.info("Updating with changed delete claim");
Async updateAsync = context.async();
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
updateAsync.complete();
});
updateAsync.await();
LOGGER.info("Reconciling again -> delete");
mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).delete();
Async deleteAsync = context.async();
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
assertPvcs(context, changedKafkaDeleteClaim ? deleteClaim(zkStorage) ? emptySet() : zkPvcs : deleteClaim(zkStorage) ? kafkaPvcs : allPvcs);
deleteAsync.complete();
});
}
use of io.strimzi.controller.cluster.Reconciliation in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockIT method testKafkaScaleUp.
/**
* Create a cluster from a Kafka Cluster CM
*/
@Test
public void testKafkaScaleUp(TestContext context) {
KafkaAssemblyOperator kco = createCluster(context);
Async updateAsync = context.async();
int newScale = kafkaReplicas + 1;
String newPod = KafkaCluster.kafkaPodName(CLUSTER_NAME, kafkaReplicas);
context.assertNull(mockClient.pods().inNamespace(NAMESPACE).withName(newPod).get());
HashMap<String, String> data = new HashMap<>(cluster.getData());
data.put(KafkaCluster.KEY_REPLICAS, String.valueOf(newScale));
ConfigMap changedClusterCm = new ConfigMapBuilder(cluster).withData(data).build();
mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).patch(changedClusterCm);
LOGGER.info("Scaling up to {} Kafka pods", newScale);
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
context.assertEquals(newScale, mockClient.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get().getSpec().getReplicas());
context.assertNotNull(mockClient.pods().inNamespace(NAMESPACE).withName(newPod).get(), "Expected pod " + newPod + " to have been created");
// TODO assert no rolling update
updateAsync.complete();
});
updateAsync.await();
}
use of io.strimzi.controller.cluster.Reconciliation in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockIT method testCreateUpdateDelete.
/**
* Create a cluster from a Kafka Cluster CM
*/
@Test
public void testCreateUpdateDelete(TestContext context) {
Set<String> expectedClaims = resilientPvcs();
KafkaAssemblyOperator kco = createCluster(context);
LOGGER.info("Reconciling again -> update");
Async updateAsync = context.async();
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
updateAsync.complete();
});
updateAsync.await();
LOGGER.info("Reconciling again -> delete");
mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).delete();
Async deleteAsync = context.async();
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
assertPvcs(context, expectedClaims);
deleteAsync.complete();
});
}
use of io.strimzi.controller.cluster.Reconciliation in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockIT method testKafkaScaleDown.
/**
* Create a cluster from a Kafka Cluster CM
*/
@Test
public void testKafkaScaleDown(TestContext context) {
if (kafkaReplicas <= 1) {
LOGGER.info("Skipping scale down test because there's only 1 broker");
return;
}
KafkaAssemblyOperator kco = createCluster(context);
Async updateAsync = context.async();
int newScale = kafkaReplicas - 1;
String deletedPod = KafkaCluster.kafkaPodName(CLUSTER_NAME, newScale);
context.assertNotNull(mockClient.pods().inNamespace(NAMESPACE).withName(deletedPod).get());
HashMap<String, String> data = new HashMap<>(cluster.getData());
data.put(KafkaCluster.KEY_REPLICAS, String.valueOf(newScale));
ConfigMap changedClusterCm = new ConfigMapBuilder(cluster).withData(data).build();
mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).patch(changedClusterCm);
LOGGER.info("Scaling down to {} Kafka pods", newScale);
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
context.assertEquals(newScale, mockClient.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get().getSpec().getReplicas());
context.assertNull(mockClient.pods().inNamespace(NAMESPACE).withName(deletedPod).get(), "Expected pod " + deletedPod + " to have been deleted");
// TODO assert no rolling update
updateAsync.complete();
});
updateAsync.await();
}
Aggregations