use of io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder in project strimzi-kafka-operator by strimzi.
the class StatefulSetDiffTest method testNewPvcNotIgnored.
@Test
public void testNewPvcNotIgnored() {
StatefulSet ss1 = new StatefulSetBuilder().withNewMetadata().withNamespace("test").withName("foo").endMetadata().withNewSpec().withNewTemplate().withNewSpec().addToVolumes(0, new VolumeBuilder().withConfigMap(new ConfigMapVolumeSourceBuilder().withDefaultMode(1).build()).build()).endSpec().endTemplate().withVolumeClaimTemplates(new PersistentVolumeClaimBuilder().withNewSpec().withNewResources().withRequests(singletonMap("storage", new Quantity("100Gi"))).endResources().endSpec().build()).endSpec().build();
StatefulSet ss2 = new StatefulSetBuilder().withNewMetadata().withNamespace("test").withName("foo").endMetadata().withNewSpec().withNewTemplate().withNewSpec().addToVolumes(0, new VolumeBuilder().withConfigMap(new ConfigMapVolumeSourceBuilder().withDefaultMode(2).build()).build()).endSpec().endTemplate().withVolumeClaimTemplates(new PersistentVolumeClaimBuilder().withNewSpec().withNewResources().withRequests(singletonMap("storage", new Quantity("100Gi"))).endResources().endSpec().build(), new PersistentVolumeClaimBuilder().withNewSpec().withNewResources().withRequests(singletonMap("storage", new Quantity("110Gi"))).endResources().endSpec().build()).endSpec().build();
assertThat(new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss1, ss2).changesVolumeClaimTemplates(), is(true));
assertThat(new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss1, ss2).changesVolumeSize(), is(false));
}
use of io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder in project strimzi-kafka-operator by strimzi.
the class MockKubeRegressionTest method testStatefulSetCreationAndDeletion.
@Test
public void testStatefulSetCreationAndDeletion() {
StatefulSet sts = new StatefulSetBuilder().withNewMetadata().withName("foo").withNamespace("ns").endMetadata().withNewSpec().withReplicas(3).withNewTemplate().withNewMetadata().endMetadata().withNewSpec().endSpec().endTemplate().endSpec().build();
client.apps().statefulSets().inNamespace("ns").withName("foo").create(sts);
List<Pod> ns = client.pods().inNamespace("ns").list().getItems();
assertThat(ns, hasSize(3));
AtomicBoolean deleted = new AtomicBoolean(false);
AtomicBoolean recreated = new AtomicBoolean(false);
Watch watch = client.pods().inNamespace("ns").withName(ns.get(0).getMetadata().getName()).watch(new Watcher<Pod>() {
@Override
public void eventReceived(Action action, Pod resource) {
if (action == Action.DELETED) {
if (deleted.getAndSet(true)) {
fail("Deleted twice");
}
} else if (action == Action.ADDED) {
if (!deleted.get()) {
fail("Created before deleted");
}
if (recreated.getAndSet(true)) {
fail("Recreated twice");
}
}
}
@Override
public void onClose(WatcherException cause) {
}
});
client.pods().inNamespace("ns").withName(ns.get(0).getMetadata().getName()).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
assertThat(deleted.get(), is(true));
assertThat(recreated.get(), is(true));
watch.close();
ns = client.pods().inNamespace("ns").list().getItems();
assertThat(ns, hasSize(3));
client.apps().statefulSets().inNamespace("ns").withName("foo").withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
}
use of io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder in project strimzi-kafka-operator by strimzi.
the class TolerationsIT method testEmptyStringValueIntoleration.
@Test
public void testEmptyStringValueIntoleration(VertxTestContext context) {
Toleration t1 = new TolerationBuilder().withEffect("NoSchedule").withValue("").build();
List<Toleration> tolerationList = new ArrayList<>();
tolerationList.add(t1);
// CO does this over the generated STS
tolerationList = ModelUtils.removeEmptyValuesFromTolerations(tolerationList);
StatefulSet ss = new StatefulSetBuilder().withNewMetadata().withNamespace(namespace).withName("foo").endMetadata().withNewSpec().withSelector(new LabelSelectorBuilder().withMatchLabels(Collections.singletonMap("app", "test")).build()).withNewTemplate().withNewMetadata().withLabels(Collections.singletonMap("app", "test")).endMetadata().withNewSpec().withTolerations(tolerationList).withDnsPolicy("ClusterFirst").withRestartPolicy("Always").withSchedulerName("default-scheduler").withSecurityContext(null).withTerminationGracePeriodSeconds(30L).endSpec().endTemplate().endSpec().build();
KubernetesClient client = new DefaultKubernetesClient();
client.apps().statefulSets().inNamespace(namespace).create(ss);
StatefulSet stsk8s = client.apps().statefulSets().inNamespace(namespace).withName("foo").get();
StatefulSetDiff diff = new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss, stsk8s);
Checkpoint checkpoint = context.checkpoint();
context.verify(() -> {
assertThat(diff.changesSpecTemplate(), is(false));
assertThat(stsk8s.getSpec().getTemplate().getSpec().getTolerations().get(0).getValue(), is(nullValue()));
assertThat(ss.getSpec().getTemplate().getSpec().getTolerations().get(0).getValue(), is(nullValue()));
checkpoint.flag();
});
}
use of io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder in project strimzi-kafka-operator by strimzi.
the class KafkaUpgradeDowngradeMockTest method testDowngradeRecoveryWithMessageAndProtocolVersions.
// Test partial downgrade => emulate previous downgrade failing in the middle and verify it is finished.
@Test
public void testDowngradeRecoveryWithMessageAndProtocolVersions(VertxTestContext context) {
Kafka initialKafka = kafkaWithVersions(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION);
Kafka updatedKafka = kafkaWithVersions(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION);
Checkpoint reconciliation = context.checkpoint();
initialize(context, initialKafka).onComplete(context.succeeding(v -> {
context.verify(() -> {
assertVersionsInStatefulSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, KafkaVersionTestUtils.LATEST_KAFKA_IMAGE);
});
})).compose(v -> {
StatefulSet sts = client.apps().statefulSets().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka").get();
StatefulSet modifiedSts = new StatefulSetBuilder(sts).editMetadata().addToAnnotations(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).endMetadata().editSpec().editTemplate().editMetadata().addToAnnotations(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).addToAnnotations(StatefulSetOperator.ANNO_STRIMZI_IO_GENERATION, "1").endMetadata().editSpec().editContainer(0).withImage(KafkaVersionTestUtils.PREVIOUS_KAFKA_IMAGE).endContainer().endSpec().endTemplate().endSpec().build();
client.apps().statefulSets().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka").createOrReplace(modifiedSts);
Pod pod = client.pods().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka-" + 1).get();
Pod modifiedPod = new PodBuilder(pod).editMetadata().addToAnnotations(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).addToAnnotations(StatefulSetOperator.ANNO_STRIMZI_IO_GENERATION, "1").endMetadata().editSpec().editContainer(0).withImage(KafkaVersionTestUtils.PREVIOUS_KAFKA_IMAGE).endContainer().endSpec().build();
client.pods().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka-" + 1).createOrReplace(modifiedPod);
return Future.succeededFuture();
}).compose(v -> operator.createOrUpdate(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), updatedKafka)).onComplete(context.succeeding(v -> context.verify(() -> {
assertVersionsInStatefulSet(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, KafkaVersionTestUtils.PREVIOUS_KAFKA_IMAGE);
reconciliation.flag();
})));
}
use of io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder in project strimzi-kafka-operator by strimzi.
the class KafkaUpgradeDowngradeMockTest method testUpgradeFromUnsupportedKafkaVersionWithMessageAndProtocol.
// Tests upgrade from Kafka version not supported by the current version of the operator with message format and
// protocol versions specified.
@Test
public void testUpgradeFromUnsupportedKafkaVersionWithMessageAndProtocol(VertxTestContext context) {
KafkaVersion unsupported = VERSIONS.version("2.1.0");
Kafka initialKafka = kafkaWithVersions(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, unsupported.messageVersion(), unsupported.protocolVersion());
Kafka updatedKafka = kafkaWithVersions(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, unsupported.messageVersion(), unsupported.protocolVersion());
Checkpoint reconciliation = context.checkpoint();
initialize(context, initialKafka).onComplete(context.succeeding(v -> {
context.verify(() -> {
assertVersionsInStatefulSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, unsupported.messageVersion(), unsupported.protocolVersion(), KafkaVersionTestUtils.LATEST_KAFKA_IMAGE);
});
})).compose(v -> {
StatefulSet sts = client.apps().statefulSets().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka").get();
StatefulSet modifiedSts = new StatefulSetBuilder(sts).editMetadata().addToAnnotations(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION, unsupported.version()).endMetadata().editSpec().editTemplate().editMetadata().removeFromAnnotations(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION).removeFromAnnotations(KafkaCluster.ANNO_STRIMZI_IO_LOG_MESSAGE_FORMAT_VERSION).removeFromAnnotations(KafkaCluster.ANNO_STRIMZI_IO_INTER_BROKER_PROTOCOL_VERSION).endMetadata().editSpec().editContainer(0).withImage("strimzi/kafka:old-kafka-2.1.0").endContainer().endSpec().endTemplate().endSpec().build();
client.apps().statefulSets().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka").createOrReplace(modifiedSts);
for (int i = 0; i < 3; i++) {
Pod pod = client.pods().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka-" + i).get();
Pod modifiedPod = new PodBuilder(pod).editMetadata().removeFromAnnotations(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION).removeFromAnnotations(KafkaCluster.ANNO_STRIMZI_IO_LOG_MESSAGE_FORMAT_VERSION).removeFromAnnotations(KafkaCluster.ANNO_STRIMZI_IO_INTER_BROKER_PROTOCOL_VERSION).endMetadata().editSpec().editContainer(0).withImage("strimzi/kafka:old-kafka-2.1.0").endContainer().endSpec().build();
client.pods().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka-" + i).createOrReplace(modifiedPod);
}
return Future.succeededFuture();
}).compose(v -> operator.createOrUpdate(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), updatedKafka)).onComplete(context.succeeding(v -> context.verify(() -> {
assertVersionsInStatefulSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, unsupported.messageVersion(), unsupported.protocolVersion(), KafkaVersionTestUtils.LATEST_KAFKA_IMAGE);
reconciliation.flag();
})));
}
Aggregations