use of io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder in project strimzi by strimzi.
the class MockKubeRegressionTest method testStatefulSetCreationAndDeletion.
@Test
public void testStatefulSetCreationAndDeletion() {
StatefulSet sts = new StatefulSetBuilder().withNewMetadata().withName("foo").withNamespace("ns").endMetadata().withNewSpec().withReplicas(3).withNewTemplate().withNewMetadata().endMetadata().withNewSpec().endSpec().endTemplate().endSpec().build();
client.apps().statefulSets().inNamespace("ns").withName("foo").create(sts);
List<Pod> ns = client.pods().inNamespace("ns").list().getItems();
assertThat(ns, hasSize(3));
AtomicBoolean deleted = new AtomicBoolean(false);
AtomicBoolean recreated = new AtomicBoolean(false);
Watch watch = client.pods().inNamespace("ns").withName(ns.get(0).getMetadata().getName()).watch(new Watcher<Pod>() {
@Override
public void eventReceived(Action action, Pod resource) {
if (action == Action.DELETED) {
if (deleted.getAndSet(true)) {
fail("Deleted twice");
}
} else if (action == Action.ADDED) {
if (!deleted.get()) {
fail("Created before deleted");
}
if (recreated.getAndSet(true)) {
fail("Recreated twice");
}
}
}
@Override
public void onClose(WatcherException cause) {
}
});
client.pods().inNamespace("ns").withName(ns.get(0).getMetadata().getName()).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
assertThat(deleted.get(), is(true));
assertThat(recreated.get(), is(true));
watch.close();
ns = client.pods().inNamespace("ns").list().getItems();
assertThat(ns, hasSize(3));
client.apps().statefulSets().inNamespace("ns").withName("foo").withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
}
use of io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder in project strimzi by strimzi.
the class TolerationsIT method testEmptyStringValueIntoleration.
@Test
public void testEmptyStringValueIntoleration(VertxTestContext context) {
Toleration t1 = new TolerationBuilder().withEffect("NoSchedule").withValue("").build();
List<Toleration> tolerationList = new ArrayList<>();
tolerationList.add(t1);
// CO does this over the generated STS
tolerationList = ModelUtils.removeEmptyValuesFromTolerations(tolerationList);
StatefulSet ss = new StatefulSetBuilder().withNewMetadata().withNamespace(namespace).withName("foo").endMetadata().withNewSpec().withSelector(new LabelSelectorBuilder().withMatchLabels(Collections.singletonMap("app", "test")).build()).withNewTemplate().withNewMetadata().withLabels(Collections.singletonMap("app", "test")).endMetadata().withNewSpec().withTolerations(tolerationList).withDnsPolicy("ClusterFirst").withRestartPolicy("Always").withSchedulerName("default-scheduler").withSecurityContext(null).withTerminationGracePeriodSeconds(30L).endSpec().endTemplate().endSpec().build();
KubernetesClient client = new DefaultKubernetesClient();
client.apps().statefulSets().inNamespace(namespace).create(ss);
StatefulSet stsk8s = client.apps().statefulSets().inNamespace(namespace).withName("foo").get();
StatefulSetDiff diff = new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss, stsk8s);
Checkpoint checkpoint = context.checkpoint();
context.verify(() -> {
assertThat(diff.changesSpecTemplate(), is(false));
assertThat(stsk8s.getSpec().getTemplate().getSpec().getTolerations().get(0).getValue(), is(nullValue()));
assertThat(ss.getSpec().getTemplate().getSpec().getTolerations().get(0).getValue(), is(nullValue()));
checkpoint.flag();
});
}
use of io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder in project strimzi by strimzi.
the class StatefulSetOperatorTest method testInternalReplace.
@Test
public void testInternalReplace(VertxTestContext context) {
StatefulSet sts1 = new StatefulSetBuilder().withNewMetadata().withNamespace(AbstractResourceOperatorTest.NAMESPACE).withName(AbstractResourceOperatorTest.RESOURCE_NAME).endMetadata().withNewSpec().withReplicas(3).withNewTemplate().withNewMetadata().endMetadata().endTemplate().endSpec().build();
Map<String, Quantity> requests = new HashMap<>();
requests.put("storage", new Quantity("100Gi"));
PersistentVolumeClaim pvc = new PersistentVolumeClaimBuilder().withNewMetadata().withName("data").endMetadata().withNewSpec().withAccessModes("ReadWriteOnce").withNewResources().withRequests(requests).endResources().withStorageClassName("gp2").endSpec().build();
StatefulSet sts2 = new StatefulSetBuilder().withNewMetadata().withNamespace(AbstractResourceOperatorTest.NAMESPACE).withName(AbstractResourceOperatorTest.RESOURCE_NAME).endMetadata().withNewSpec().withReplicas(3).withNewTemplate().withNewMetadata().endMetadata().endTemplate().withVolumeClaimTemplates(pvc).endSpec().build();
Deletable mockDeletable = mock(Deletable.class);
when(mockDeletable.delete()).thenReturn(Boolean.TRUE);
Resource mockERPD = mock(resourceType());
when(mockERPD.withPropagationPolicy(any(DeletionPropagation.class))).thenReturn(mockDeletable);
when(mockERPD.withGracePeriod(anyLong())).thenReturn(mockDeletable);
Resource mockResource = mock(resourceType());
when(mockResource.get()).thenReturn(sts1);
when(mockResource.withPropagationPolicy(eq(DeletionPropagation.ORPHAN))).thenReturn(mockERPD);
when(mockResource.create(any(StatefulSet.class))).thenReturn(sts1);
PodOperator podOperator = mock(PodOperator.class);
when(podOperator.waitFor(any(), anyString(), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture());
when(podOperator.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(podOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture());
when(podOperator.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(new PodBuilder().withNewMetadata().withName("my-pod-0").endMetadata().build()));
PvcOperator pvcOperator = mock(PvcOperator.class);
when(pvcOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture());
NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class);
when(mockNameable.withName(matches(RESOURCE_NAME))).thenReturn(mockResource);
MixedOperation mockCms = mock(MixedOperation.class);
when(mockCms.inNamespace(matches(NAMESPACE))).thenReturn(mockNameable);
KubernetesClient mockClient = mock(KubernetesClient.class);
mocker(mockClient, mockCms);
StatefulSetOperator op = new StatefulSetOperator(AbstractResourceOperatorTest.vertx, mockClient, 5_000L, podOperator, pvcOperator) {
@Override
protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) {
return true;
}
@Override
public Future<Void> waitFor(Reconciliation reconciliation, String namespace, String name, String logState, long pollIntervalMs, final long timeoutMs, BiPredicate<String, String> predicate) {
return Future.succeededFuture();
}
};
Checkpoint async = context.checkpoint();
op.reconcile(new Reconciliation("test", "kind", "namespace", "name"), sts1.getMetadata().getNamespace(), sts1.getMetadata().getName(), sts2).onComplete(context.succeeding(rrState -> {
verify(mockDeletable).delete();
async.flag();
}));
}
use of io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder in project strimzi by strimzi.
the class KafkaUpgradeDowngradeMockTest method testDowngradeRecoveryWithMessageAndProtocolVersions.
// Test partial downgrade => emulate previous downgrade failing in the middle and verify it is finished.
@Test
public void testDowngradeRecoveryWithMessageAndProtocolVersions(VertxTestContext context) {
Kafka initialKafka = kafkaWithVersions(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION);
Kafka updatedKafka = kafkaWithVersions(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION);
Checkpoint reconciliation = context.checkpoint();
initialize(context, initialKafka).onComplete(context.succeeding(v -> {
context.verify(() -> {
assertVersionsInStatefulSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, KafkaVersionTestUtils.LATEST_KAFKA_IMAGE);
});
})).compose(v -> {
StatefulSet sts = client.apps().statefulSets().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka").get();
StatefulSet modifiedSts = new StatefulSetBuilder(sts).editMetadata().addToAnnotations(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).endMetadata().editSpec().editTemplate().editMetadata().addToAnnotations(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).addToAnnotations(StatefulSetOperator.ANNO_STRIMZI_IO_GENERATION, "1").endMetadata().editSpec().editContainer(0).withImage(KafkaVersionTestUtils.PREVIOUS_KAFKA_IMAGE).endContainer().endSpec().endTemplate().endSpec().build();
client.apps().statefulSets().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka").createOrReplace(modifiedSts);
Pod pod = client.pods().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka-" + 1).get();
Pod modifiedPod = new PodBuilder(pod).editMetadata().addToAnnotations(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).addToAnnotations(StatefulSetOperator.ANNO_STRIMZI_IO_GENERATION, "1").endMetadata().editSpec().editContainer(0).withImage(KafkaVersionTestUtils.PREVIOUS_KAFKA_IMAGE).endContainer().endSpec().build();
client.pods().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka-" + 1).createOrReplace(modifiedPod);
return Future.succeededFuture();
}).compose(v -> operator.createOrUpdate(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), updatedKafka)).onComplete(context.succeeding(v -> context.verify(() -> {
assertVersionsInStatefulSet(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, KafkaVersionTestUtils.PREVIOUS_KAFKA_IMAGE);
reconciliation.flag();
})));
}
use of io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder in project strimzi by strimzi.
the class KafkaUpgradeDowngradeMockTest method testUpgradeFromUnsupportedKafkaVersionWithMessageAndProtocol.
// Tests upgrade from Kafka version not supported by the current version of the operator with message format and
// protocol versions specified.
@Test
public void testUpgradeFromUnsupportedKafkaVersionWithMessageAndProtocol(VertxTestContext context) {
KafkaVersion unsupported = VERSIONS.version("2.1.0");
Kafka initialKafka = kafkaWithVersions(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, unsupported.messageVersion(), unsupported.protocolVersion());
Kafka updatedKafka = kafkaWithVersions(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, unsupported.messageVersion(), unsupported.protocolVersion());
Checkpoint reconciliation = context.checkpoint();
initialize(context, initialKafka).onComplete(context.succeeding(v -> {
context.verify(() -> {
assertVersionsInStatefulSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, unsupported.messageVersion(), unsupported.protocolVersion(), KafkaVersionTestUtils.LATEST_KAFKA_IMAGE);
});
})).compose(v -> {
StatefulSet sts = client.apps().statefulSets().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka").get();
StatefulSet modifiedSts = new StatefulSetBuilder(sts).editMetadata().addToAnnotations(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION, unsupported.version()).endMetadata().editSpec().editTemplate().editMetadata().removeFromAnnotations(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION).removeFromAnnotations(KafkaCluster.ANNO_STRIMZI_IO_LOG_MESSAGE_FORMAT_VERSION).removeFromAnnotations(KafkaCluster.ANNO_STRIMZI_IO_INTER_BROKER_PROTOCOL_VERSION).endMetadata().editSpec().editContainer(0).withImage("strimzi/kafka:old-kafka-2.1.0").endContainer().endSpec().endTemplate().endSpec().build();
client.apps().statefulSets().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka").createOrReplace(modifiedSts);
for (int i = 0; i < 3; i++) {
Pod pod = client.pods().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka-" + i).get();
Pod modifiedPod = new PodBuilder(pod).editMetadata().removeFromAnnotations(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION).removeFromAnnotations(KafkaCluster.ANNO_STRIMZI_IO_LOG_MESSAGE_FORMAT_VERSION).removeFromAnnotations(KafkaCluster.ANNO_STRIMZI_IO_INTER_BROKER_PROTOCOL_VERSION).endMetadata().editSpec().editContainer(0).withImage("strimzi/kafka:old-kafka-2.1.0").endContainer().endSpec().build();
client.pods().inNamespace(NAMESPACE).withName(CLUSTER_NAME + "-kafka-" + i).createOrReplace(modifiedPod);
}
return Future.succeededFuture();
}).compose(v -> operator.createOrUpdate(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), updatedKafka)).onComplete(context.succeeding(v -> context.verify(() -> {
assertVersionsInStatefulSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, unsupported.messageVersion(), unsupported.protocolVersion(), KafkaVersionTestUtils.LATEST_KAFKA_IMAGE);
reconciliation.flag();
})));
}
Aggregations