use of io.strimzi.api.kafka.model.EntityOperatorSpecBuilder in project strimzi by strimzi.
the class KafkaAssemblyOperatorTest method data.
public static Iterable<Params> data() {
boolean[] metricsOpenShiftAndEntityOperatorOptions = { true, false };
SingleVolumeStorage[] storageConfig = { new EphemeralStorage(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(true).build() };
List<Map<String, Object>> configs = asList(null, emptyMap(), singletonMap("foo", "bar"));
List<Params> result = new ArrayList<>();
for (boolean metricsOpenShiftAndEntityOperator : metricsOpenShiftAndEntityOperatorOptions) {
for (Map<String, Object> config : configs) {
for (SingleVolumeStorage storage : storageConfig) {
EntityOperatorSpec eoConfig;
if (metricsOpenShiftAndEntityOperator) {
eoConfig = new EntityOperatorSpecBuilder().withUserOperator(new EntityUserOperatorSpecBuilder().build()).withTopicOperator(new EntityTopicOperatorSpecBuilder().build()).build();
} else {
eoConfig = null;
}
List<GenericKafkaListener> listeners = new ArrayList<>(3);
listeners.add(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().build());
listeners.add(new GenericKafkaListenerBuilder().withName("tls").withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
if (metricsOpenShiftAndEntityOperator) {
// On OpenShift, use Routes
listeners.add(new GenericKafkaListenerBuilder().withName("external").withPort(9094).withType(KafkaListenerType.ROUTE).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
} else {
// On Kube, use nodeports
listeners.add(new GenericKafkaListenerBuilder().withName("external").withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
}
result.add(new Params(metricsOpenShiftAndEntityOperator, metricsOpenShiftAndEntityOperator, listeners, config, config, storage, storage, eoConfig));
}
}
}
return result;
}
use of io.strimzi.api.kafka.model.EntityOperatorSpecBuilder in project debezium by debezium.
the class FabricKafkaBuilder method defaultKafkaEntityOperatorSpec.
private static EntityOperatorSpec defaultKafkaEntityOperatorSpec() {
EntityTopicOperatorSpec topicOperator = new EntityTopicOperatorSpec();
EntityUserOperatorSpec userOperator = new EntityUserOperatorSpec();
return new EntityOperatorSpecBuilder().withTopicOperator(topicOperator).withUserOperator(userOperator).build();
}
use of io.strimzi.api.kafka.model.EntityOperatorSpecBuilder in project strimzi-kafka-operator by strimzi.
the class NamespaceDeletionRecoveryIsolatedST method testTopicNotAvailable.
/**
* In case we don't have KafkaTopic resources from before the cluster loss, we do these steps:
* 1. deploy the Kafka cluster without Topic Operator - otherwise topics will be deleted
* 2. delete KafkaTopic Store topics - `__strimzi-topic-operator-kstreams-topic-store-changelog` and `__strimzi_store_topic`
* 3. enable Topic Operator by redeploying Kafka cluster
* @throws InterruptedException - sleep
*/
@IsolatedTest("We need for each test case its own Cluster Operator")
@Tag(INTERNAL_CLIENTS_USED)
void testTopicNotAvailable(ExtensionContext extensionContext) throws InterruptedException {
final TestStorage testStorage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
prepareEnvironmentForRecovery(extensionContext, testStorage);
// Wait till consumer offset topic is created
KafkaTopicUtils.waitForKafkaTopicCreationByNamePrefix("consumer-offsets");
// Get list of topics and list of PVC needed for recovery
List<PersistentVolumeClaim> persistentVolumeClaimList = kubeClient().getClient().persistentVolumeClaims().list().getItems();
deleteAndRecreateNamespace();
recreatePvcAndUpdatePv(persistentVolumeClaimList);
recreateClusterOperator(extensionContext);
// Recreate Kafka Cluster
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 3).editSpec().editKafka().withNewPersistentClaimStorage().withSize("1Gi").withStorageClass(storageClassName).endPersistentClaimStorage().endKafka().editZookeeper().withNewPersistentClaimStorage().withSize("1Gi").withStorageClass(storageClassName).endPersistentClaimStorage().endZookeeper().withNewEntityOperator().endEntityOperator().endSpec().build());
// Wait some time after kafka is ready before delete topics files
Thread.sleep(60000);
// Remove all topic data from topic store
String deleteTopicStoreTopics = "./bin/kafka-topics.sh --bootstrap-server localhost:9092 --topic __strimzi-topic-operator-kstreams-topic-store-changelog --delete " + "&& ./bin/kafka-topics.sh --bootstrap-server localhost:9092 --topic __strimzi_store_topic --delete";
cmdKubeClient(testStorage.getNamespaceName()).execInPod(KafkaResources.kafkaPodName(testStorage.getClusterName(), 0), "/bin/bash", "-c", deleteTopicStoreTopics);
// Wait till exec result will be finish
Thread.sleep(30000);
KafkaResource.replaceKafkaResource(testStorage.getClusterName(), k -> {
k.getSpec().setEntityOperator(new EntityOperatorSpecBuilder().withNewTopicOperator().endTopicOperator().withNewUserOperator().endUserOperator().build());
});
DeploymentUtils.waitForDeploymentAndPodsReady(KafkaResources.entityOperatorDeploymentName(testStorage.getClusterName()), 1);
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).build();
resourceManager.createResource(extensionContext, clients.producerStrimzi(), clients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.api.kafka.model.EntityOperatorSpecBuilder in project strimzi-kafka-operator by strimzi.
the class EntityOperatorTest method testFromCrdNoTopicInEntityOperator.
@ParallelTest
public void testFromCrdNoTopicInEntityOperator() {
EntityOperatorSpec entityOperatorSpec = new EntityOperatorSpecBuilder().withNewUserOperator().endUserOperator().build();
Kafka resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().withEntityOperator(entityOperatorSpec).endSpec().build();
EntityOperator entityOperator = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS, true);
assertThat(entityOperator.topicOperator(), is(nullValue()));
assertThat(entityOperator.userOperator(), is(notNullValue()));
}
use of io.strimzi.api.kafka.model.EntityOperatorSpecBuilder in project strimzi-kafka-operator by strimzi.
the class EntityOperatorTest method testFromCrdNoUserOperatorInEntityOperator.
@ParallelTest
public void testFromCrdNoUserOperatorInEntityOperator() {
EntityOperatorSpec entityOperatorSpec = new EntityOperatorSpecBuilder().withNewTopicOperator().endTopicOperator().build();
Kafka resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().withEntityOperator(entityOperatorSpec).endSpec().build();
EntityOperator entityOperator = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS, true);
assertThat(entityOperator.topicOperator(), is(notNullValue()));
assertThat(entityOperator.userOperator(), is(nullValue()));
}
Aggregations