use of io.strimzi.api.kafka.model.EntityOperatorSpecBuilder in project strimzi by strimzi.
the class EntityUserOperatorTest method testFromCrdCaValidityAndRenewal.
@ParallelTest
public void testFromCrdCaValidityAndRenewal() {
EntityUserOperatorSpec entityUserOperatorSpec = new EntityUserOperatorSpecBuilder().build();
EntityOperatorSpec entityOperatorSpec = new EntityOperatorSpecBuilder().withUserOperator(entityUserOperatorSpec).build();
CertificateAuthority ca = new CertificateAuthority();
ca.setValidityDays(42);
ca.setRenewalDays(69);
Kafka customValues = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().withEntityOperator(entityOperatorSpec).withClientsCa(ca).endSpec().build();
EntityUserOperator entityUserOperator = EntityUserOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), customValues);
Kafka defaultValues = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().withEntityOperator(entityOperatorSpec).endSpec().build();
EntityUserOperator entityUserOperator2 = EntityUserOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), defaultValues);
assertThat(entityUserOperator.getClientsCaValidityDays(), is(42L));
assertThat(entityUserOperator.getClientsCaRenewalDays(), is(69L));
assertThat(entityUserOperator2.getClientsCaValidityDays(), is(Long.valueOf(CertificateAuthority.DEFAULT_CERTS_VALIDITY_DAYS)));
assertThat(entityUserOperator2.getClientsCaRenewalDays(), is(Long.valueOf(CertificateAuthority.DEFAULT_CERTS_RENEWAL_DAYS)));
}
use of io.strimzi.api.kafka.model.EntityOperatorSpecBuilder in project strimzi by strimzi.
the class EntityUserOperatorTest method testFromCrdDefault.
@ParallelTest
public void testFromCrdDefault() {
EntityUserOperatorSpec entityUserOperatorSpec = new EntityUserOperatorSpecBuilder().build();
EntityOperatorSpec entityOperatorSpec = new EntityOperatorSpecBuilder().withUserOperator(entityUserOperatorSpec).build();
Kafka resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().withEntityOperator(entityOperatorSpec).endSpec().build();
EntityUserOperator entityUserOperator = EntityUserOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource);
assertThat(entityUserOperator.getWatchedNamespace(), is(namespace));
assertThat(entityUserOperator.getImage(), is("quay.io/strimzi/operator:latest"));
assertThat(entityUserOperator.getReconciliationIntervalMs(), is(EntityUserOperatorSpec.DEFAULT_FULL_RECONCILIATION_INTERVAL_SECONDS * 1000));
assertThat(entityUserOperator.readinessProbeOptions.getInitialDelaySeconds(), is(EntityUserOperatorSpec.DEFAULT_HEALTHCHECK_DELAY));
assertThat(entityUserOperator.readinessProbeOptions.getTimeoutSeconds(), is(EntityUserOperatorSpec.DEFAULT_HEALTHCHECK_TIMEOUT));
assertThat(entityUserOperator.livenessProbeOptions.getInitialDelaySeconds(), is(EntityUserOperatorSpec.DEFAULT_HEALTHCHECK_DELAY));
assertThat(entityUserOperator.livenessProbeOptions.getTimeoutSeconds(), is(EntityUserOperatorSpec.DEFAULT_HEALTHCHECK_TIMEOUT));
assertThat(entityUserOperator.getKafkaBootstrapServers(), is(EntityUserOperator.defaultBootstrapServers(cluster)));
assertThat(entityUserOperator.getLogging(), is(nullValue()));
assertThat(entityUserOperator.getSecretPrefix(), is(EntityUserOperatorSpec.DEFAULT_SECRET_PREFIX));
}
use of io.strimzi.api.kafka.model.EntityOperatorSpecBuilder in project strimzi by strimzi.
the class NamespaceDeletionRecoveryIsolatedST method testTopicNotAvailable.
/**
* In case we don't have KafkaTopic resources from before the cluster loss, we do these steps:
* 1. deploy the Kafka cluster without Topic Operator - otherwise topics will be deleted
* 2. delete KafkaTopic Store topics - `__strimzi-topic-operator-kstreams-topic-store-changelog` and `__strimzi_store_topic`
* 3. enable Topic Operator by redeploying Kafka cluster
* @throws InterruptedException - sleep
*/
@IsolatedTest("We need for each test case its own Cluster Operator")
@Tag(INTERNAL_CLIENTS_USED)
void testTopicNotAvailable(ExtensionContext extensionContext) throws InterruptedException {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
prepareEnvironmentForRecovery(extensionContext, topicName);
// Wait till consumer offset topic is created
KafkaTopicUtils.waitForKafkaTopicCreationByNamePrefix("consumer-offsets");
// Get list of topics and list of PVC needed for recovery
List<PersistentVolumeClaim> persistentVolumeClaimList = kubeClient().getClient().persistentVolumeClaims().list().getItems();
deleteAndRecreateNamespace();
recreatePvcAndUpdatePv(persistentVolumeClaimList);
recreateClusterOperator(extensionContext);
// Recreate Kafka Cluster
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editSpec().editKafka().withNewPersistentClaimStorage().withSize("1Gi").withStorageClass(storageClassName).endPersistentClaimStorage().endKafka().editZookeeper().withNewPersistentClaimStorage().withSize("1Gi").withStorageClass(storageClassName).endPersistentClaimStorage().endZookeeper().withNewEntityOperator().endEntityOperator().endSpec().build());
// Wait some time after kafka is ready before delete topics files
Thread.sleep(60000);
// Remove all topic data from topic store
String deleteTopicStoreTopics = "./bin/kafka-topics.sh --bootstrap-server localhost:9092 --topic __strimzi-topic-operator-kstreams-topic-store-changelog --delete " + "&& ./bin/kafka-topics.sh --bootstrap-server localhost:9092 --topic __strimzi_store_topic --delete";
cmdKubeClient(INFRA_NAMESPACE).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", deleteTopicStoreTopics);
// Wait till exec result will be finish
Thread.sleep(30000);
KafkaResource.replaceKafkaResource(clusterName, k -> {
k.getSpec().setEntityOperator(new EntityOperatorSpecBuilder().withNewTopicOperator().endTopicOperator().withNewUserOperator().endUserOperator().build());
});
DeploymentUtils.waitForDeploymentAndPodsReady(KafkaResources.entityOperatorDeploymentName(clusterName), 1);
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
String defaultKafkaClientsPodName = ResourceManager.kubeClient().listPodsByPrefixInName(clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(defaultKafkaClientsPodName).withTopicName(topicName).withNamespaceName(INFRA_NAMESPACE).withClusterName(CLUSTER_NAME).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
LOGGER.info("Checking produced and consumed messages to pod:{}", internalKafkaClient.getPodName());
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain());
}
use of io.strimzi.api.kafka.model.EntityOperatorSpecBuilder in project strimzi by strimzi.
the class EntityOperatorTest method testFromCrdNoTopicAndUserOperatorInEntityOperator.
@ParallelTest
public void testFromCrdNoTopicAndUserOperatorInEntityOperator() {
EntityOperatorSpec entityOperatorSpec = new EntityOperatorSpecBuilder().build();
Kafka resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().withEntityOperator(entityOperatorSpec).endSpec().build();
EntityOperator entityOperator = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS);
assertThat(entityOperator.getTopicOperator(), is(nullValue()));
assertThat(entityOperator.getUserOperator(), is(nullValue()));
}
use of io.strimzi.api.kafka.model.EntityOperatorSpecBuilder in project strimzi by strimzi.
the class EntityTopicOperatorTest method testFromCrdNoTopicOperatorInEntityOperator.
@ParallelTest
public void testFromCrdNoTopicOperatorInEntityOperator() {
EntityOperatorSpec entityOperatorSpec = new EntityOperatorSpecBuilder().build();
Kafka resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().withEntityOperator(entityOperatorSpec).endSpec().build();
EntityTopicOperator entityTopicOperator = EntityTopicOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource);
assertThat(entityTopicOperator, is(nullValue()));
}
Aggregations