use of io.fabric8.kubernetes.api.model.extensions.StatefulSet in project strimzi by strimzi.
the class KafkaCluster method fromAssembly.
/**
* Create a Kafka cluster from the deployed StatefulSet resource
*
* @param ss The StatefulSet from which the cluster state should be recovered.
* @param namespace Kubernetes/OpenShift namespace where cluster resources belong to
* @param cluster overall cluster name
* @return Kafka cluster instance
*/
public static KafkaCluster fromAssembly(StatefulSet ss, String namespace, String cluster) {
KafkaCluster kafka = new KafkaCluster(namespace, cluster, Labels.fromResource(ss));
kafka.setReplicas(ss.getSpec().getReplicas());
Container container = ss.getSpec().getTemplate().getSpec().getContainers().get(0);
kafka.setImage(container.getImage());
kafka.setHealthCheckInitialDelay(container.getReadinessProbe().getInitialDelaySeconds());
kafka.setHealthCheckTimeout(container.getReadinessProbe().getTimeoutSeconds());
Map<String, String> vars = containerEnvVars(container);
kafka.setZookeeperConnect(vars.getOrDefault(KEY_KAFKA_ZOOKEEPER_CONNECT, ss.getMetadata().getName() + "-zookeeper:2181"));
kafka.setDefaultReplicationFactor(Integer.parseInt(vars.getOrDefault(KEY_KAFKA_DEFAULT_REPLICATION_FACTOR, String.valueOf(DEFAULT_KAFKA_DEFAULT_REPLICATION_FACTOR))));
kafka.setOffsetsTopicReplicationFactor(Integer.parseInt(vars.getOrDefault(KEY_KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR, String.valueOf(DEFAULT_KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR))));
kafka.setTransactionStateLogReplicationFactor(Integer.parseInt(vars.getOrDefault(KEY_KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR, String.valueOf(DEFAULT_KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR))));
kafka.setMetricsEnabled(Boolean.parseBoolean(vars.getOrDefault(KEY_KAFKA_METRICS_ENABLED, String.valueOf(DEFAULT_KAFKA_METRICS_ENABLED))));
if (kafka.isMetricsEnabled()) {
kafka.setMetricsConfigName(metricConfigsName(cluster));
}
if (!ss.getSpec().getVolumeClaimTemplates().isEmpty()) {
Storage storage = Storage.fromPersistentVolumeClaim(ss.getSpec().getVolumeClaimTemplates().get(0));
if (ss.getMetadata().getAnnotations() != null) {
String deleteClaimAnnotation = String.format("%s/%s", ClusterController.STRIMZI_CLUSTER_CONTROLLER_DOMAIN, Storage.DELETE_CLAIM_FIELD);
storage.withDeleteClaim(Boolean.valueOf(ss.getMetadata().getAnnotations().computeIfAbsent(deleteClaimAnnotation, s -> "false")));
}
kafka.setStorage(storage);
} else {
Storage storage = new Storage(Storage.StorageType.EPHEMERAL);
kafka.setStorage(storage);
}
return kafka;
}
use of io.fabric8.kubernetes.api.model.extensions.StatefulSet in project strimzi by strimzi.
the class KafkaAssemblyOperator method createOrUpdateKafka.
private final Future<Void> createOrUpdateKafka(Reconciliation reconciliation, ConfigMap assemblyCm) {
String namespace = assemblyCm.getMetadata().getNamespace();
String name = assemblyCm.getMetadata().getName();
log.info("{}: create/update kafka {}", reconciliation, name);
KafkaCluster kafka = KafkaCluster.fromConfigMap(assemblyCm);
Service service = kafka.generateService();
Service headlessService = kafka.generateHeadlessService();
ConfigMap metricsConfigMap = kafka.generateMetricsConfigMap();
StatefulSet statefulSet = kafka.generateStatefulSet(isOpenShift);
Future<Void> chainFuture = Future.future();
kafkaSetOperations.scaleDown(namespace, kafka.getName(), kafka.getReplicas()).compose(scale -> serviceOperations.reconcile(namespace, kafka.getName(), service)).compose(i -> serviceOperations.reconcile(namespace, kafka.getHeadlessName(), headlessService)).compose(i -> configMapOperations.reconcile(namespace, kafka.getMetricsConfigName(), metricsConfigMap)).compose(i -> kafkaSetOperations.reconcile(namespace, kafka.getName(), statefulSet)).compose(diffs -> {
if (diffs instanceof ReconcileResult.Patched && ((ReconcileResult.Patched<Boolean>) diffs).differences()) {
return kafkaSetOperations.rollingUpdate(namespace, kafka.getName());
} else {
return Future.succeededFuture();
}
}).compose(i -> kafkaSetOperations.scaleUp(namespace, kafka.getName(), kafka.getReplicas())).compose(scale -> serviceOperations.endpointReadiness(namespace, service, 1_000, operationTimeoutMs)).compose(i -> serviceOperations.endpointReadiness(namespace, headlessService, 1_000, operationTimeoutMs)).compose(chainFuture::complete, chainFuture);
return chainFuture;
}
use of io.fabric8.kubernetes.api.model.extensions.StatefulSet in project strimzi by strimzi.
the class KafkaAssemblyOperator method deleteZk.
private final Future<CompositeFuture> deleteZk(Reconciliation reconciliation) {
String namespace = reconciliation.namespace();
String name = reconciliation.assemblyName();
log.info("{}: delete zookeeper {}", reconciliation, name);
StatefulSet ss = zkSetOperations.get(namespace, ZookeeperCluster.zookeeperClusterName(name));
ZookeeperCluster zk = ss == null ? null : ZookeeperCluster.fromAssembly(ss, namespace, name);
boolean deleteClaims = zk != null && zk.getStorage().type() == Storage.StorageType.PERSISTENT_CLAIM && zk.getStorage().isDeleteClaim();
List<Future> result = new ArrayList<>(4 + (deleteClaims ? zk.getReplicas() : 0));
result.add(configMapOperations.reconcile(namespace, ZookeeperCluster.zookeeperMetricsName(name), null));
result.add(serviceOperations.reconcile(namespace, ZookeeperCluster.zookeeperClusterName(name), null));
result.add(serviceOperations.reconcile(namespace, ZookeeperCluster.zookeeperHeadlessName(name), null));
result.add(zkSetOperations.reconcile(namespace, ZookeeperCluster.zookeeperClusterName(name), null));
if (deleteClaims) {
for (int i = 0; i < zk.getReplicas(); i++) {
result.add(pvcOperations.reconcile(namespace, zk.getPersistentVolumeClaimName(i), null));
}
}
return CompositeFuture.join(result);
}
use of io.fabric8.kubernetes.api.model.extensions.StatefulSet in project strimzi by strimzi.
the class KafkaClusterTest method testGenerateStatefulSet.
@Test
public void testGenerateStatefulSet() {
// We expect a single statefulSet ...
StatefulSet ss = kc.generateStatefulSet(true);
checkStatefulSet(ss);
}
use of io.fabric8.kubernetes.api.model.extensions.StatefulSet in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockIT method assertStorageClass.
private void assertStorageClass(TestContext context, String statefulSetName, String expectedClass) {
StatefulSet statefulSet = mockClient.apps().statefulSets().inNamespace(NAMESPACE).withName(statefulSetName).get();
context.assertNotNull(statefulSet);
// Check the storage class is initially "foo"
List<PersistentVolumeClaim> volumeClaimTemplates = statefulSet.getSpec().getVolumeClaimTemplates();
context.assertFalse(volumeClaimTemplates.isEmpty());
context.assertEquals(expectedClass, volumeClaimTemplates.get(0).getSpec().getStorageClassName());
}
Aggregations