use of io.strimzi.api.kafka.model.EntityTopicOperatorSpecBuilder in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperatorTest method data.
public static Iterable<Params> data() {
boolean[] metricsOpenShiftAndEntityOperatorOptions = { true, false };
SingleVolumeStorage[] storageConfig = { new EphemeralStorage(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(true).build() };
List<Map<String, Object>> configs = asList(null, emptyMap(), singletonMap("foo", "bar"));
List<Params> result = new ArrayList<>();
for (boolean metricsOpenShiftAndEntityOperator : metricsOpenShiftAndEntityOperatorOptions) {
for (Map<String, Object> config : configs) {
for (SingleVolumeStorage storage : storageConfig) {
EntityOperatorSpec eoConfig;
if (metricsOpenShiftAndEntityOperator) {
eoConfig = new EntityOperatorSpecBuilder().withUserOperator(new EntityUserOperatorSpecBuilder().build()).withTopicOperator(new EntityTopicOperatorSpecBuilder().build()).build();
} else {
eoConfig = null;
}
List<GenericKafkaListener> listeners = new ArrayList<>(3);
listeners.add(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().build());
listeners.add(new GenericKafkaListenerBuilder().withName("tls").withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
if (metricsOpenShiftAndEntityOperator) {
// On OpenShift, use Routes
listeners.add(new GenericKafkaListenerBuilder().withName("external").withPort(9094).withType(KafkaListenerType.ROUTE).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
} else {
// On Kube, use nodeports
listeners.add(new GenericKafkaListenerBuilder().withName("external").withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
}
result.add(new Params(metricsOpenShiftAndEntityOperator, metricsOpenShiftAndEntityOperator, listeners, config, config, storage, storage, eoConfig));
}
}
}
return result;
}
use of io.strimzi.api.kafka.model.EntityTopicOperatorSpecBuilder in project strimzi-kafka-operator by strimzi.
the class EntityTopicOperatorTest method testFromCrdDefault.
@ParallelTest
public void testFromCrdDefault() {
EntityTopicOperatorSpec entityTopicOperatorSpec = new EntityTopicOperatorSpecBuilder().build();
EntityOperatorSpec entityOperatorSpec = new EntityOperatorSpecBuilder().withTopicOperator(entityTopicOperatorSpec).build();
Kafka resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().withEntityOperator(entityOperatorSpec).endSpec().build();
EntityTopicOperator entityTopicOperator = EntityTopicOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource);
assertThat(entityTopicOperator.getWatchedNamespace(), is(namespace));
assertThat(entityTopicOperator.getImage(), is("quay.io/strimzi/operator:latest"));
assertThat(entityTopicOperator.getReconciliationIntervalMs(), is(EntityTopicOperatorSpec.DEFAULT_FULL_RECONCILIATION_INTERVAL_SECONDS * 1000));
assertThat(entityTopicOperator.getZookeeperSessionTimeoutMs(), is(EntityTopicOperatorSpec.DEFAULT_ZOOKEEPER_SESSION_TIMEOUT_SECONDS * 1000));
assertThat(entityTopicOperator.getTopicMetadataMaxAttempts(), is(EntityTopicOperatorSpec.DEFAULT_TOPIC_METADATA_MAX_ATTEMPTS));
assertThat(entityTopicOperator.getZookeeperConnect(), is(EntityTopicOperator.defaultZookeeperConnect(cluster)));
assertThat(entityTopicOperator.getKafkaBootstrapServers(), is(EntityTopicOperator.defaultBootstrapServers(cluster)));
assertThat(entityTopicOperator.getResourceLabels(), is(ModelUtils.defaultResourceLabels(cluster)));
assertThat(entityTopicOperator.readinessProbeOptions.getInitialDelaySeconds(), is(EntityTopicOperatorSpec.DEFAULT_HEALTHCHECK_DELAY));
assertThat(entityTopicOperator.readinessProbeOptions.getTimeoutSeconds(), is(EntityTopicOperatorSpec.DEFAULT_HEALTHCHECK_TIMEOUT));
assertThat(entityTopicOperator.livenessProbeOptions.getInitialDelaySeconds(), is(EntityTopicOperatorSpec.DEFAULT_HEALTHCHECK_DELAY));
assertThat(entityTopicOperator.livenessProbeOptions.getTimeoutSeconds(), is(EntityTopicOperatorSpec.DEFAULT_HEALTHCHECK_TIMEOUT));
assertThat(entityTopicOperator.getLogging(), is(nullValue()));
}
use of io.strimzi.api.kafka.model.EntityTopicOperatorSpecBuilder in project strimzi by strimzi.
the class EntityTopicOperatorTest method testFromCrdDefault.
@ParallelTest
public void testFromCrdDefault() {
EntityTopicOperatorSpec entityTopicOperatorSpec = new EntityTopicOperatorSpecBuilder().build();
EntityOperatorSpec entityOperatorSpec = new EntityOperatorSpecBuilder().withTopicOperator(entityTopicOperatorSpec).build();
Kafka resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().withEntityOperator(entityOperatorSpec).endSpec().build();
EntityTopicOperator entityTopicOperator = EntityTopicOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource);
assertThat(entityTopicOperator.getWatchedNamespace(), is(namespace));
assertThat(entityTopicOperator.getImage(), is("quay.io/strimzi/operator:latest"));
assertThat(entityTopicOperator.getReconciliationIntervalMs(), is(EntityTopicOperatorSpec.DEFAULT_FULL_RECONCILIATION_INTERVAL_SECONDS * 1000));
assertThat(entityTopicOperator.getZookeeperSessionTimeoutMs(), is(EntityTopicOperatorSpec.DEFAULT_ZOOKEEPER_SESSION_TIMEOUT_SECONDS * 1000));
assertThat(entityTopicOperator.getTopicMetadataMaxAttempts(), is(EntityTopicOperatorSpec.DEFAULT_TOPIC_METADATA_MAX_ATTEMPTS));
assertThat(entityTopicOperator.getZookeeperConnect(), is(EntityTopicOperator.defaultZookeeperConnect(cluster)));
assertThat(entityTopicOperator.getKafkaBootstrapServers(), is(EntityTopicOperator.defaultBootstrapServers(cluster)));
assertThat(entityTopicOperator.getResourceLabels(), is(ModelUtils.defaultResourceLabels(cluster)));
assertThat(entityTopicOperator.readinessProbeOptions.getInitialDelaySeconds(), is(EntityTopicOperatorSpec.DEFAULT_HEALTHCHECK_DELAY));
assertThat(entityTopicOperator.readinessProbeOptions.getTimeoutSeconds(), is(EntityTopicOperatorSpec.DEFAULT_HEALTHCHECK_TIMEOUT));
assertThat(entityTopicOperator.livenessProbeOptions.getInitialDelaySeconds(), is(EntityTopicOperatorSpec.DEFAULT_HEALTHCHECK_DELAY));
assertThat(entityTopicOperator.livenessProbeOptions.getTimeoutSeconds(), is(EntityTopicOperatorSpec.DEFAULT_HEALTHCHECK_TIMEOUT));
assertThat(entityTopicOperator.getLogging(), is(nullValue()));
}
use of io.strimzi.api.kafka.model.EntityTopicOperatorSpecBuilder in project strimzi by strimzi.
the class KafkaAssemblyOperatorTest method data.
public static Iterable<Params> data() {
boolean[] metricsOpenShiftAndEntityOperatorOptions = { true, false };
SingleVolumeStorage[] storageConfig = { new EphemeralStorage(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(true).build() };
List<Map<String, Object>> configs = asList(null, emptyMap(), singletonMap("foo", "bar"));
List<Params> result = new ArrayList<>();
for (boolean metricsOpenShiftAndEntityOperator : metricsOpenShiftAndEntityOperatorOptions) {
for (Map<String, Object> config : configs) {
for (SingleVolumeStorage storage : storageConfig) {
EntityOperatorSpec eoConfig;
if (metricsOpenShiftAndEntityOperator) {
eoConfig = new EntityOperatorSpecBuilder().withUserOperator(new EntityUserOperatorSpecBuilder().build()).withTopicOperator(new EntityTopicOperatorSpecBuilder().build()).build();
} else {
eoConfig = null;
}
List<GenericKafkaListener> listeners = new ArrayList<>(3);
listeners.add(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().build());
listeners.add(new GenericKafkaListenerBuilder().withName("tls").withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
if (metricsOpenShiftAndEntityOperator) {
// On OpenShift, use Routes
listeners.add(new GenericKafkaListenerBuilder().withName("external").withPort(9094).withType(KafkaListenerType.ROUTE).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
} else {
// On Kube, use nodeports
listeners.add(new GenericKafkaListenerBuilder().withName("external").withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
}
result.add(new Params(metricsOpenShiftAndEntityOperator, metricsOpenShiftAndEntityOperator, listeners, config, config, storage, storage, eoConfig));
}
}
}
return result;
}
Aggregations