use of io.strimzi.operator.common.Reconciliation in project strimzi by strimzi.
the class KafkaExporterTest method testFromConfigMapDefaultConfig.
@ParallelTest
public void testFromConfigMapDefaultConfig() {
Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, null, healthDelay, healthTimeout, jmxMetricsConfig, kafkaConfig, zooConfig, kafkaStorage, zkStorage, kafkaLogJson, zooLogJson, new KafkaExporterSpec(), null);
KafkaExporter ke = KafkaExporter.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS);
assertThat(ke.getImage(), is(KafkaVersionTestUtils.DEFAULT_KAFKA_IMAGE));
assertThat(ke.logging, is("info"));
assertThat(ke.groupRegex, is(".*"));
assertThat(ke.topicRegex, is(".*"));
assertThat(ke.saramaLoggingEnabled, is(false));
}
use of io.strimzi.operator.common.Reconciliation in project strimzi by strimzi.
the class KafkaExporterTest method testExporterNotDeployed.
@ParallelTest
public void testExporterNotDeployed() {
Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, kafkaConfig, zooConfig, kafkaStorage, zkStorage, kafkaLogJson, zooLogJson, null, null);
KafkaExporter ke = KafkaExporter.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS);
assertThat(ke.generateDeployment(true, null, null), is(nullValue()));
assertThat(ke.generateSecret(null, true), is(nullValue()));
}
use of io.strimzi.operator.common.Reconciliation in project strimzi by strimzi.
the class KafkaExporterTest method testTemplate.
@ParallelTest
public void testTemplate() {
Map<String, String> depLabels = TestUtils.map("l1", "v1", "l2", "v2", Labels.KUBERNETES_PART_OF_LABEL, "custom-part", Labels.KUBERNETES_MANAGED_BY_LABEL, "custom-managed-by");
Map<String, String> expectedDepLabels = new HashMap<>(depLabels);
expectedDepLabels.remove(Labels.KUBERNETES_MANAGED_BY_LABEL);
Map<String, String> depAnots = TestUtils.map("a1", "v1", "a2", "v2");
Map<String, String> podLabels = TestUtils.map("l3", "v3", "l4", "v4");
Map<String, String> podAnots = TestUtils.map("a3", "v3", "a4", "v4");
Map<String, String> saLabels = TestUtils.map("l5", "v5", "l6", "v6");
Map<String, String> saAnots = TestUtils.map("a5", "v5", "a6", "v6");
Affinity affinity = new AffinityBuilder().withNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(new NodeSelectorTermBuilder().addNewMatchExpression().withKey("key1").withOperator("In").withValues("value1", "value2").endMatchExpression().build()).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity().build();
List<Toleration> tolerations = singletonList(new TolerationBuilder().withEffect("NoExecute").withKey("key1").withOperator("Equal").withValue("value1").build());
TopologySpreadConstraint tsc1 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/zone").withMaxSkew(1).withWhenUnsatisfiable("DoNotSchedule").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
TopologySpreadConstraint tsc2 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/hostname").withMaxSkew(2).withWhenUnsatisfiable("ScheduleAnyway").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
Kafka resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().withNewKafkaExporter().withNewTemplate().withNewDeployment().withNewMetadata().withLabels(depLabels).withAnnotations(depAnots).endMetadata().endDeployment().withNewPod().withNewMetadata().withLabels(podLabels).withAnnotations(podAnots).endMetadata().withPriorityClassName("top-priority").withSchedulerName("my-scheduler").withAffinity(affinity).withTolerations(tolerations).withTopologySpreadConstraints(tsc1, tsc2).withEnableServiceLinks(false).endPod().withNewServiceAccount().withNewMetadata().withLabels(saLabels).withAnnotations(saAnots).endMetadata().endServiceAccount().endTemplate().endKafkaExporter().endSpec().build();
KafkaExporter ke = KafkaExporter.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS);
// Check Deployment
Deployment dep = ke.generateDeployment(true, null, null);
assertThat(dep.getMetadata().getLabels().entrySet().containsAll(expectedDepLabels.entrySet()), is(true));
assertThat(dep.getMetadata().getAnnotations().entrySet().containsAll(depAnots.entrySet()), is(true));
// Check Pods
assertThat(dep.getSpec().getTemplate().getMetadata().getLabels().entrySet().containsAll(podLabels.entrySet()), is(true));
assertThat(dep.getSpec().getTemplate().getMetadata().getAnnotations().entrySet().containsAll(podAnots.entrySet()), is(true));
assertThat(dep.getSpec().getTemplate().getSpec().getPriorityClassName(), is("top-priority"));
assertThat(dep.getSpec().getTemplate().getSpec().getSchedulerName(), is("my-scheduler"));
assertThat(dep.getSpec().getTemplate().getSpec().getAffinity(), is(affinity));
assertThat(dep.getSpec().getTemplate().getSpec().getTolerations(), is(tolerations));
assertThat(dep.getSpec().getTemplate().getSpec().getTopologySpreadConstraints(), containsInAnyOrder(tsc1, tsc2));
assertThat(dep.getSpec().getTemplate().getSpec().getEnableServiceLinks(), is(false));
// Check Service Account
ServiceAccount sa = ke.generateServiceAccount();
assertThat(sa.getMetadata().getLabels().entrySet().containsAll(saLabels.entrySet()), is(true));
assertThat(sa.getMetadata().getAnnotations().entrySet().containsAll(saAnots.entrySet()), is(true));
}
use of io.strimzi.operator.common.Reconciliation in project strimzi by strimzi.
the class KafkaExporterTest method testContainerTemplateEnvVarsWithKeyConflict.
@ParallelTest
public void testContainerTemplateEnvVarsWithKeyConflict() {
ContainerEnvVar envVar1 = new ContainerEnvVar();
String testEnvOneKey = "TEST_ENV_1";
String testEnvOneValue = "test.env.one";
envVar1.setName(testEnvOneKey);
envVar1.setValue(testEnvOneValue);
ContainerEnvVar envVar2 = new ContainerEnvVar();
String testEnvTwoKey = KafkaExporter.ENV_VAR_KAFKA_EXPORTER_GROUP_REGEX;
String testEnvTwoValue = "my-special-value";
envVar2.setName(testEnvTwoKey);
envVar2.setValue(testEnvTwoValue);
KafkaExporterSpec exporterSpec = new KafkaExporterSpecBuilder().withLogging(exporterOperatorLogging).withGroupRegex(groupRegex).withTopicRegex(topicRegex).withImage(keImage).withNewTemplate().withNewContainer().withEnv(envVar1, envVar2).endContainer().endTemplate().build();
Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, kafkaConfig, zooConfig, kafkaStorage, zkStorage, kafkaLogJson, zooLogJson, exporterSpec, null);
KafkaExporter ke = KafkaExporter.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS);
List<EnvVar> kafkaEnvVars = ke.getEnvVars();
assertThat(kafkaEnvVars.stream().filter(var -> testEnvOneKey.equals(var.getName())).map(EnvVar::getValue).findFirst().orElseThrow(), is(testEnvOneValue));
assertThat(kafkaEnvVars.stream().filter(var -> testEnvTwoKey.equals(var.getName())).map(EnvVar::getValue).findFirst().orElseThrow(), is(groupRegex));
}
use of io.strimzi.operator.common.Reconciliation in project strimzi by strimzi.
the class KafkaConnectBuildTest method testValidationPluginsExist.
@ParallelTest
public void testValidationPluginsExist() {
KafkaConnect kc = new KafkaConnectBuilder().withNewMetadata().withName(cluster).withNamespace(namespace).endMetadata().withNewSpec().withBootstrapServers("my-kafka:9092").withNewBuild().withNewDockerOutput().withImage("my-image:latest").withPushSecret("my-docker-credentials").endDockerOutput().endBuild().endSpec().build();
assertThrows(InvalidResourceException.class, () -> {
KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS);
});
}
Aggregations