use of io.fabric8.kubernetes.api.model.extensions.StatefulSet in project jointware by isdream.
the class KubernetesKeyValueStyleGeneratorTest method testKubernetesWithAllKind.
protected static void testKubernetesWithAllKind() throws Exception {
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new ServiceAccount());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new ThirdPartyResource());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new ResourceQuota());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Node());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new ConfigMap());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new NetworkPolicy());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new CustomResourceDefinition());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Ingress());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Service());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Namespace());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Secret());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new LimitRange());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Event());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new PersistentVolume());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new StatefulSet());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new PersistentVolumeClaim());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new DaemonSet());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new HorizontalPodAutoscaler());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Pod());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new ReplicaSet());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Job());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new ReplicationController());
info(KUBERNETES_KIND, KubernetesDocumentKeyValueStyleGenerator.class.getName(), new Deployment());
}
use of io.fabric8.kubernetes.api.model.extensions.StatefulSet in project fabric8-maven-plugin by fabric8io.
the class DefaultControllerEnricher method addMissingResources.
@Override
public void addMissingResources(KubernetesListBuilder builder) {
final String name = getConfig(Config.name, MavenUtil.createDefaultResourceName(getProject()));
final ResourceConfig config = new ResourceConfig.Builder().controllerName(name).imagePullPolicy(getConfig(Config.pullPolicy)).withReplicas(Configs.asInt(getConfig(Config.replicaCount))).build();
final List<ImageConfiguration> images = getImages();
// Check if at least a replica set is added. If not add a default one
if (!KubernetesResourceUtil.checkForKind(builder, POD_CONTROLLER_KINDS)) {
// At least one image must be present, otherwise the resulting config will be invalid
if (!Lists.isNullOrEmpty(images)) {
String type = getConfig(Config.type);
if ("deployment".equalsIgnoreCase(type)) {
log.info("Adding a default Deployment");
builder.addToDeploymentItems(deployHandler.getDeployment(config, images));
} else if ("statefulSet".equalsIgnoreCase(type)) {
log.info("Adding a default StatefulSet");
builder.addToStatefulSetItems(statefulSetHandler.getStatefulSet(config, images));
} else if ("daemonSet".equalsIgnoreCase(type)) {
log.info("Adding a default DaemonSet");
builder.addToDaemonSetItems(daemonSetHandler.getDaemonSet(config, images));
} else if ("replicaSet".equalsIgnoreCase(type)) {
log.info("Adding a default ReplicaSet");
builder.addToReplicaSetItems(rsHandler.getReplicaSet(config, images));
} else if ("replicationController".equalsIgnoreCase(type)) {
log.info("Adding a default ReplicationController");
builder.addToReplicationControllerItems(rcHandler.getReplicationController(config, images));
} else if ("job".equalsIgnoreCase(type)) {
log.info("Adding a default Job");
builder.addToJobItems(jobHandler.getJob(config, images));
}
}
} else if (KubernetesResourceUtil.checkForKind(builder, "StatefulSet")) {
final StatefulSetSpec spec = statefulSetHandler.getStatefulSet(config, images).getSpec();
if (spec != null) {
builder.accept(new TypedVisitor<StatefulSetBuilder>() {
@Override
public void visit(StatefulSetBuilder statefulSetBuilder) {
statefulSetBuilder.editOrNewSpec().editOrNewTemplate().editOrNewSpec().endSpec().endTemplate().endSpec();
mergeStatefulSetSpec(statefulSetBuilder, spec);
}
});
if (spec.getTemplate() != null && spec.getTemplate().getSpec() != null) {
final PodSpec podSpec = spec.getTemplate().getSpec();
builder.accept(new TypedVisitor<PodSpecBuilder>() {
@Override
public void visit(PodSpecBuilder builder) {
KubernetesResourceUtil.mergePodSpec(builder, podSpec, name);
}
});
}
}
} else {
final DeploymentSpec spec = deployHandler.getDeployment(config, images).getSpec();
if (spec != null) {
builder.accept(new TypedVisitor<DeploymentBuilder>() {
@Override
public void visit(DeploymentBuilder deploymentBuilder) {
deploymentBuilder.editOrNewSpec().editOrNewTemplate().editOrNewSpec().endSpec().endTemplate().endSpec();
mergeDeploymentSpec(deploymentBuilder, spec);
}
});
if (spec.getTemplate() != null && spec.getTemplate().getSpec() != null) {
final PodSpec podSpec = spec.getTemplate().getSpec();
builder.accept(new TypedVisitor<PodSpecBuilder>() {
@Override
public void visit(PodSpecBuilder builder) {
KubernetesResourceUtil.mergePodSpec(builder, podSpec, name);
}
});
}
}
}
}
use of io.fabric8.kubernetes.api.model.extensions.StatefulSet in project fabric8-maven-plugin by fabric8io.
the class StatefulSetHandlerTest method statefulSetHandlerTest.
@Test
public void statefulSetHandlerTest() {
ContainerHandler containerHandler = new ContainerHandler(project, envVarHandler, probeHandler);
PodTemplateHandler podTemplateHandler = new PodTemplateHandler(containerHandler);
StatefulSetHandler statefulSetHandler = new StatefulSetHandler(podTemplateHandler);
ResourceConfig config = new ResourceConfig.Builder().imagePullPolicy("IfNotPresent").controllerName("testing").withServiceAccount("test-account").withReplicas(5).volumes(volumes1).build();
StatefulSet statefulSet = statefulSetHandler.getStatefulSet(config, images);
// Assertion
assertNotNull(statefulSet.getSpec());
assertNotNull(statefulSet.getMetadata());
assertEquals(5, statefulSet.getSpec().getReplicas().intValue());
assertNotNull(statefulSet.getSpec().getTemplate());
assertEquals("testing", statefulSet.getMetadata().getName());
assertEquals("testing", statefulSet.getSpec().getServiceName());
assertEquals("test-account", statefulSet.getSpec().getTemplate().getSpec().getServiceAccountName());
assertFalse(statefulSet.getSpec().getTemplate().getSpec().getVolumes().isEmpty());
assertEquals("test", statefulSet.getSpec().getTemplate().getSpec().getVolumes().get(0).getName());
assertEquals("/test/path", statefulSet.getSpec().getTemplate().getSpec().getVolumes().get(0).getHostPath().getPath());
assertNotNull(statefulSet.getSpec().getTemplate().getSpec().getContainers());
}
use of io.fabric8.kubernetes.api.model.extensions.StatefulSet in project strimzi by strimzi.
the class KafkaAssemblyOperatorTest method updateCluster.
private void updateCluster(TestContext context, ConfigMap originalCm, ConfigMap clusterCm, boolean kafkaRolling, boolean zkRolling) {
KafkaCluster originalKafkaCluster = KafkaCluster.fromConfigMap(originalCm);
KafkaCluster updatedKafkaCluster = KafkaCluster.fromConfigMap(clusterCm);
ZookeeperCluster originalZookeeperCluster = ZookeeperCluster.fromConfigMap(originalCm);
ZookeeperCluster updatedZookeeperCluster = ZookeeperCluster.fromConfigMap(clusterCm);
TopicController originalTopicController = TopicController.fromConfigMap(originalCm);
// create CM, Service, headless service, statefulset and so on
ConfigMapOperator mockCmOps = mock(ConfigMapOperator.class);
ServiceOperator mockServiceOps = mock(ServiceOperator.class);
ZookeeperSetOperator mockZsOps = mock(ZookeeperSetOperator.class);
KafkaSetOperator mockKsOps = mock(KafkaSetOperator.class);
PvcOperator mockPvcOps = mock(PvcOperator.class);
DeploymentOperator mockDepOps = mock(DeploymentOperator.class);
String clusterCmName = clusterCm.getMetadata().getName();
String clusterCmNamespace = clusterCm.getMetadata().getNamespace();
// Mock CM get
when(mockCmOps.get(clusterCmNamespace, clusterCmName)).thenReturn(clusterCm);
ConfigMap metricsCm = new ConfigMapBuilder().withNewMetadata().withName(KafkaCluster.metricConfigsName(clusterCmName)).withNamespace(clusterCmNamespace).endMetadata().withData(Collections.singletonMap(AbstractModel.METRICS_CONFIG_FILE, METRICS_CONFIG)).build();
when(mockCmOps.get(clusterCmNamespace, KafkaCluster.metricConfigsName(clusterCmName))).thenReturn(metricsCm);
ConfigMap zkMetricsCm = new ConfigMapBuilder().withNewMetadata().withName(ZookeeperCluster.zookeeperMetricsName(clusterCmName)).withNamespace(clusterCmNamespace).endMetadata().withData(Collections.singletonMap(AbstractModel.METRICS_CONFIG_FILE, METRICS_CONFIG)).build();
when(mockCmOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperMetricsName(clusterCmName))).thenReturn(zkMetricsCm);
// Mock Service gets
when(mockServiceOps.get(clusterCmNamespace, KafkaCluster.kafkaClusterName(clusterCmName))).thenReturn(originalKafkaCluster.generateService());
when(mockServiceOps.get(clusterCmNamespace, KafkaCluster.headlessName(clusterCmName))).thenReturn(originalKafkaCluster.generateHeadlessService());
when(mockServiceOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperClusterName(clusterCmName))).thenReturn(originalKafkaCluster.generateService());
when(mockServiceOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperHeadlessName(clusterCmName))).thenReturn(originalZookeeperCluster.generateHeadlessService());
when(mockServiceOps.endpointReadiness(eq(clusterCmNamespace), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
// Mock StatefulSet get
when(mockKsOps.get(clusterCmNamespace, KafkaCluster.kafkaClusterName(clusterCmName))).thenReturn(originalKafkaCluster.generateStatefulSet(openShift));
when(mockZsOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperClusterName(clusterCmName))).thenReturn(originalZookeeperCluster.generateStatefulSet(openShift));
// Mock Deployment get
if (originalTopicController != null) {
when(mockDepOps.get(clusterCmNamespace, TopicController.topicControllerName(clusterCmName))).thenReturn(originalTopicController.generateDeployment());
}
// Mock CM patch
Set<String> metricsCms = set();
doAnswer(invocation -> {
metricsCms.add(invocation.getArgument(1));
return Future.succeededFuture();
}).when(mockCmOps).reconcile(eq(clusterCmNamespace), anyString(), any());
// Mock Service patch (both service and headless service
ArgumentCaptor<String> patchedServicesCaptor = ArgumentCaptor.forClass(String.class);
when(mockServiceOps.reconcile(eq(clusterCmNamespace), patchedServicesCaptor.capture(), any())).thenReturn(Future.succeededFuture());
// Mock StatefulSet patch
when(mockZsOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture(ReconcileResult.patched(zkRolling)));
when(mockKsOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture(ReconcileResult.patched(kafkaRolling)));
// Mock StatefulSet rollingUpdate
Set<String> rollingRestarts = set();
// Mock StatefulSet scaleUp
ArgumentCaptor<String> scaledUpCaptor = ArgumentCaptor.forClass(String.class);
when(mockZsOps.scaleUp(anyString(), scaledUpCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
// Mock StatefulSet scaleDown
ArgumentCaptor<String> scaledDownCaptor = ArgumentCaptor.forClass(String.class);
when(mockZsOps.scaleDown(anyString(), scaledDownCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockZsOps.rollingUpdate(anyString(), anyString())).thenAnswer(i -> {
if (!zkRolling) {
context.fail("Unexpected rolling update");
}
return Future.succeededFuture();
});
// ArgumentCaptor<String> scaledUpCaptor = ArgumentCaptor.forClass(String.class);
when(mockKsOps.scaleUp(anyString(), scaledUpCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
// Mock StatefulSet scaleDown
// ArgumentCaptor<String> scaledDownCaptor = ArgumentCaptor.forClass(String.class);
when(mockKsOps.scaleDown(anyString(), scaledDownCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockKsOps.rollingUpdate(anyString(), anyString())).thenAnswer(i -> {
if (!kafkaRolling) {
context.fail("Unexpected rolling update");
}
return Future.succeededFuture();
});
// Mock Deployment patch
ArgumentCaptor<String> depCaptor = ArgumentCaptor.forClass(String.class);
when(mockDepOps.reconcile(anyString(), depCaptor.capture(), any())).thenReturn(Future.succeededFuture());
KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, openShift, ClusterControllerConfig.DEFAULT_OPERATION_TIMEOUT_MS, mockCmOps, mockServiceOps, mockZsOps, mockKsOps, mockPvcOps, mockDepOps);
// Now try to update a KafkaCluster based on this CM
Async async = context.async();
ops.createOrUpdate(new Reconciliation("test-trigger", AssemblyType.KAFKA, clusterCmNamespace, clusterCmName), clusterCm, createResult -> {
if (createResult.failed())
createResult.cause().printStackTrace();
context.assertTrue(createResult.succeeded());
// rolling restart
Set<String> expectedRollingRestarts = set();
if (KafkaSetOperator.needsRollingUpdate(new StatefulSetDiff(originalKafkaCluster.generateStatefulSet(openShift), updatedKafkaCluster.generateStatefulSet(openShift)))) {
expectedRollingRestarts.add(originalKafkaCluster.getName());
}
if (ZookeeperSetOperator.needsRollingUpdate(new StatefulSetDiff(originalZookeeperCluster.generateStatefulSet(openShift), updatedZookeeperCluster.generateStatefulSet(openShift)))) {
expectedRollingRestarts.add(originalZookeeperCluster.getName());
}
// No metrics config => no CMs created
verify(mockCmOps, never()).createOrUpdate(any());
verifyNoMoreInteractions(mockPvcOps);
async.complete();
});
}
use of io.fabric8.kubernetes.api.model.extensions.StatefulSet in project strimzi by strimzi.
the class KafkaAssemblyOperatorTest method deleteCluster.
private void deleteCluster(TestContext context, ConfigMap clusterCm) {
ZookeeperCluster zookeeperCluster = ZookeeperCluster.fromConfigMap(clusterCm);
KafkaCluster kafkaCluster = KafkaCluster.fromConfigMap(clusterCm);
TopicController topicController = TopicController.fromConfigMap(clusterCm);
// create CM, Service, headless service, statefulset
ConfigMapOperator mockCmOps = mock(ConfigMapOperator.class);
ServiceOperator mockServiceOps = mock(ServiceOperator.class);
ZookeeperSetOperator mockZsOps = mock(ZookeeperSetOperator.class);
KafkaSetOperator mockKsOps = mock(KafkaSetOperator.class);
PvcOperator mockPvcOps = mock(PvcOperator.class);
DeploymentOperator mockDepOps = mock(DeploymentOperator.class);
String clusterCmName = clusterCm.getMetadata().getName();
String clusterCmNamespace = clusterCm.getMetadata().getNamespace();
StatefulSet kafkaSs = kafkaCluster.generateStatefulSet(true);
StatefulSet zkSs = zookeeperCluster.generateStatefulSet(true);
when(mockKsOps.get(clusterCmNamespace, KafkaCluster.kafkaClusterName(clusterCmName))).thenReturn(kafkaSs);
when(mockZsOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperClusterName(clusterCmName))).thenReturn(zkSs);
when(mockCmOps.get(clusterCmNamespace, clusterCmName)).thenReturn(clusterCm);
ArgumentCaptor<String> serviceCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> ssCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> metricsCaptor = ArgumentCaptor.forClass(String.class);
when(mockCmOps.reconcile(eq(clusterCmNamespace), metricsCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
when(mockServiceOps.reconcile(eq(clusterCmNamespace), serviceCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
when(mockKsOps.reconcile(anyString(), ssCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
when(mockZsOps.reconcile(anyString(), ssCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> pvcCaptor = ArgumentCaptor.forClass(String.class);
when(mockPvcOps.reconcile(eq(clusterCmNamespace), pvcCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> depCaptor = ArgumentCaptor.forClass(String.class);
when(mockDepOps.reconcile(eq(clusterCmNamespace), depCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());
if (topicController != null) {
Deployment tcDep = topicController.generateDeployment();
when(mockDepOps.get(clusterCmNamespace, TopicController.topicControllerName(clusterCmName))).thenReturn(tcDep);
}
KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, openShift, ClusterControllerConfig.DEFAULT_OPERATION_TIMEOUT_MS, mockCmOps, mockServiceOps, mockZsOps, mockKsOps, mockPvcOps, mockDepOps);
// Now try to delete a KafkaCluster based on this CM
Async async = context.async();
ops.delete(new Reconciliation("test-trigger", AssemblyType.KAFKA, clusterCmNamespace, clusterCmName), createResult -> {
context.assertTrue(createResult.succeeded());
/*Set<String> metricsNames = new HashSet<>();
if (kafkaCluster.isMetricsEnabled()) {
metricsNames.add(KafkaCluster.metricConfigsName(clusterCmName));
}
if (zookeeperCluster.isMetricsEnabled()) {
metricsNames.add(ZookeeperCluster.zookeeperMetricsName(clusterCmName));
}
context.assertEquals(metricsNames, captured(metricsCaptor));*/
verify(mockZsOps).reconcile(eq(clusterCmNamespace), eq(ZookeeperCluster.zookeeperClusterName(clusterCmName)), isNull());
context.assertEquals(set(ZookeeperCluster.zookeeperHeadlessName(clusterCmName), ZookeeperCluster.zookeeperClusterName(clusterCmName), KafkaCluster.kafkaClusterName(clusterCmName), KafkaCluster.headlessName(clusterCmName)), captured(serviceCaptor));
// verify deleted Statefulsets
context.assertEquals(set(zookeeperCluster.getName(), kafkaCluster.getName()), captured(ssCaptor));
// PvcOperations only used for deletion
Set<String> expectedPvcDeletions = new HashSet<>();
for (int i = 0; deleteClaim && i < kafkaCluster.getReplicas(); i++) {
expectedPvcDeletions.add("data-" + clusterCmName + "-kafka-" + i);
}
for (int i = 0; deleteClaim && i < zookeeperCluster.getReplicas(); i++) {
expectedPvcDeletions.add("data-" + clusterCmName + "-zookeeper-" + i);
}
context.assertEquals(expectedPvcDeletions, captured(pvcCaptor));
// if topic controller configuration was defined in the CM
if (topicController != null) {
Set<String> expectedDepNames = new HashSet<>();
expectedDepNames.add(TopicController.topicControllerName(clusterCmName));
context.assertEquals(expectedDepNames, captured(depCaptor));
}
async.complete();
});
}
Aggregations