use of io.strimzi.test.annotations.ParallelTest in project strimzi by strimzi.
the class ListenersValidatorTest method testInvalidNames.
@ParallelTest
public void testInvalidNames() {
GenericKafkaListener listener1 = new GenericKafkaListenerBuilder().withName("listener 1").withPort(9900).withType(KafkaListenerType.INTERNAL).build();
GenericKafkaListener listener2 = new GenericKafkaListenerBuilder().withName("LISTENER2").withPort(9901).withType(KafkaListenerType.INTERNAL).build();
GenericKafkaListener listener3 = new GenericKafkaListenerBuilder().withName("listener12345678901234567890").withPort(9902).withType(KafkaListenerType.INTERNAL).build();
List<GenericKafkaListener> listeners = asList(listener1, listener2, listener3);
assertThat(ListenersValidator.validateAndGetErrorMessages(3, listeners), containsInAnyOrder("listener names [listener 1, LISTENER2, listener12345678901234567890] are invalid and do not match the pattern ^[a-z0-9]{1,11}$"));
}
use of io.strimzi.test.annotations.ParallelTest in project strimzi by strimzi.
the class ListenersValidatorTest method testValidateBrokerCertChainAndKey.
@ParallelTest
public void testValidateBrokerCertChainAndKey() {
GenericKafkaListener listener1 = new GenericKafkaListenerBuilder().withName("listener1").withPort(9900).withType(KafkaListenerType.INTERNAL).withNewConfiguration().withNewBrokerCertChainAndKey().withCertificate("").withKey("").endBrokerCertChainAndKey().endConfiguration().build();
List<GenericKafkaListener> listeners = asList(listener1);
Exception exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, listeners));
assertThat(exception.getMessage(), allOf(containsString("listener 'listener1' cannot have an empty secret name in the brokerCertChainAndKey"), containsString("listener 'listener1' cannot have an empty key in the brokerCertChainAndKey"), containsString("listener 'listener1' cannot have an empty certificate in the brokerCertChainAndKey")));
}
use of io.strimzi.test.annotations.ParallelTest in project strimzi by strimzi.
the class KafkaClusterTest method testCruiseControlWithSingleNodeKafka.
@ParallelTest
public void testCruiseControlWithSingleNodeKafka() {
Map<String, Object> config = new HashMap<>();
config.put("offsets.topic.replication.factor", 1);
config.put("transaction.state.log.replication.factor", 1);
config.put("transaction.state.log.min.isr", 1);
Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withReplicas(1).withConfig(config).endKafka().withNewCruiseControl().endCruiseControl().endSpec().build();
InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> {
KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS);
});
assertThat(ex.getMessage(), is("Kafka " + namespace + "/" + cluster + " has invalid configuration. " + "Cruise Control cannot be deployed with a single-node Kafka cluster. It requires at least two Kafka nodes."));
}
use of io.strimzi.test.annotations.ParallelTest in project strimzi by strimzi.
the class KafkaConnectBuildTest method testValidationPluginsExist.
@ParallelTest
public void testValidationPluginsExist() {
KafkaConnect kc = new KafkaConnectBuilder().withNewMetadata().withName(cluster).withNamespace(namespace).endMetadata().withNewSpec().withBootstrapServers("my-kafka:9092").withNewBuild().withNewDockerOutput().withImage("my-image:latest").withPushSecret("my-docker-credentials").endDockerOutput().endBuild().endSpec().build();
assertThrows(InvalidResourceException.class, () -> {
KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS);
});
}
use of io.strimzi.test.annotations.ParallelTest in project strimzi by strimzi.
the class KafkaConnectBuildTest method testDeployment.
@ParallelTest
public void testDeployment() {
Map<String, Quantity> limit = new HashMap<>();
limit.put("cpu", new Quantity("500m"));
limit.put("memory", new Quantity("512Mi"));
Map<String, Quantity> request = new HashMap<>();
request.put("cpu", new Quantity("1000m"));
request.put("memory", new Quantity("1Gi"));
KafkaConnect kc = new KafkaConnectBuilder().withNewMetadata().withName(cluster).withNamespace(namespace).endMetadata().withNewSpec().withImage("my-source-image:latest").withBootstrapServers("my-kafka:9092").withNewBuild().withNewDockerOutput().withImage("my-image:latest").withPushSecret("my-docker-credentials").endDockerOutput().withPlugins(new PluginBuilder().withName("my-connector").withArtifacts(jarArtifactWithChecksum).build(), new PluginBuilder().withName("my-connector2").withArtifacts(jarArtifactNoChecksum).build()).withResources(new ResourceRequirementsBuilder().withLimits(limit).withRequests(request).build()).endBuild().endSpec().build();
KafkaConnectBuild build = KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS);
assertThat(build.baseImage, is("my-source-image:latest"));
Pod pod = build.generateBuilderPod(true, ImagePullPolicy.IFNOTPRESENT, null, null);
assertThat(pod.getMetadata().getName(), is(KafkaConnectResources.buildPodName(cluster)));
assertThat(pod.getMetadata().getNamespace(), is(namespace));
Map<String, String> expectedDeploymentLabels = TestUtils.map(Labels.STRIMZI_CLUSTER_LABEL, this.cluster, Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.buildPodName(cluster), Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND, Labels.KUBERNETES_NAME_LABEL, KafkaConnectBuild.APPLICATION_NAME, Labels.KUBERNETES_INSTANCE_LABEL, this.cluster, Labels.KUBERNETES_PART_OF_LABEL, Labels.APPLICATION_NAME + "-" + this.cluster, Labels.KUBERNETES_MANAGED_BY_LABEL, AbstractModel.STRIMZI_CLUSTER_OPERATOR_NAME);
assertThat(pod.getMetadata().getLabels(), is(expectedDeploymentLabels));
assertThat(pod.getSpec().getServiceAccountName(), is(KafkaConnectResources.buildServiceAccountName(cluster)));
assertThat(pod.getSpec().getContainers().size(), is(1));
assertThat(pod.getSpec().getContainers().get(0).getArgs(), is(defaultArgs));
assertThat(pod.getSpec().getContainers().get(0).getName(), is(KafkaConnectResources.buildPodName(this.cluster)));
assertThat(pod.getSpec().getContainers().get(0).getImage(), is(build.image));
assertThat(pod.getSpec().getContainers().get(0).getPorts().size(), is(0));
assertThat(pod.getSpec().getContainers().get(0).getResources().getLimits(), is(limit));
assertThat(pod.getSpec().getContainers().get(0).getResources().getRequests(), is(request));
assertThat(pod.getSpec().getVolumes().size(), is(2));
assertThat(pod.getSpec().getVolumes().get(0).getName(), is("dockerfile"));
assertThat(pod.getSpec().getVolumes().get(0).getConfigMap().getName(), is(KafkaConnectResources.dockerFileConfigMapName(cluster)));
assertThat(pod.getSpec().getVolumes().get(1).getName(), is("docker-credentials"));
assertThat(pod.getSpec().getVolumes().get(1).getSecret().getSecretName(), is("my-docker-credentials"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().size(), is(2));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), is("dockerfile"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getMountPath(), is("/dockerfile"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getName(), is("docker-credentials"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getMountPath(), is("/kaniko/.docker"));
assertThat(pod.getMetadata().getOwnerReferences().size(), is(1));
assertThat(pod.getMetadata().getOwnerReferences().get(0), is(build.createOwnerReference()));
}
Aggregations