use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class KafkaStatusTest method testKafkaListenerNodePortAddressSameNode.
@Test
public void testKafkaListenerNodePortAddressSameNode(VertxTestContext context) throws ParseException {
Kafka kafka = new KafkaBuilder(getKafkaCrd()).editOrNewSpec().editOrNewKafka().withListeners(new GenericKafkaListenerBuilder().withName("external").withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()).endKafka().endSpec().build();
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
// Mock the CRD Operator for Kafka resources
CrdOperator mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka));
when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka);
ArgumentCaptor<Kafka> kafkaCaptor = ArgumentCaptor.forClass(Kafka.class);
when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock the KafkaSetOperator
StatefulSetOperator mockStsOps = supplier.stsOperations;
when(mockStsOps.getAsync(eq(namespace), eq(KafkaCluster.kafkaClusterName(clusterName)))).thenReturn(Future.succeededFuture(kafkaCluster.generateStatefulSet(false, null, null, null)));
// Mock the StrimziPodSet operator
CrdOperator<KubernetesClient, StrimziPodSet, StrimziPodSetList> mockPodSetOps = supplier.strimziPodSetOperator;
when(mockPodSetOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(null));
// Mock the ConfigMapOperator
ConfigMapOperator mockCmOps = supplier.configMapOperations;
when(mockCmOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafkaCluster.generateMetricsAndLogConfigMap(new MetricsAndLogging(null, null))));
// Mock Pods Operator
Pod pod0 = new PodBuilder().withNewMetadata().withName(clusterName + "-kafka-" + 0).endMetadata().withNewStatus().withHostIP("10.0.0.1").endStatus().build();
Pod pod1 = new PodBuilder().withNewMetadata().withName(clusterName + "-kafka-" + 1).endMetadata().withNewStatus().withHostIP("10.0.0.1").endStatus().build();
Pod pod2 = new PodBuilder().withNewMetadata().withName(clusterName + "-kafka-" + 2).endMetadata().withNewStatus().withHostIP("10.0.0.1").endStatus().build();
List<Pod> pods = new ArrayList<>();
pods.add(pod0);
pods.add(pod1);
pods.add(pod2);
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(eq(namespace), any(Labels.class))).thenReturn(Future.succeededFuture(pods));
// Mock Node operator
NodeOperator mockNodeOps = supplier.nodeOperator;
when(mockNodeOps.listAsync(any(Labels.class))).thenReturn(Future.succeededFuture(getClusterNodes()));
MockNodePortStatusKafkaAssemblyOperator kao = new MockNodePortStatusKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), certManager, passwordGenerator, supplier, config);
Checkpoint async = context.checkpoint();
kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName)).onComplete(res -> {
assertThat(res.succeeded(), is(true));
assertThat(kafkaCaptor.getValue(), is(notNullValue()));
assertThat(kafkaCaptor.getValue().getStatus(), is(notNullValue()));
KafkaStatus status = kafkaCaptor.getValue().getStatus();
assertThat(status.getListeners().size(), is(1));
assertThat(status.getListeners().get(0).getType(), is("external"));
assertThat(status.getListeners().get(0).getName(), is("external"));
List<ListenerAddress> addresses = status.getListeners().get(0).getAddresses();
assertThat(addresses.size(), is(1));
List<ListenerAddress> expected = new ArrayList<>();
expected.add(new ListenerAddressBuilder().withHost("50.35.18.119").withPort(31234).build());
async.flag();
});
}
use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class KafkaSpecCheckerTest method checkInterBrokerProtocolVersion.
@Test
public void checkInterBrokerProtocolVersion() {
Map<String, Object> kafkaOptions = new HashMap<>();
kafkaOptions.put(KafkaConfiguration.INTERBROKER_PROTOCOL_VERSION, KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION);
kafkaOptions.put(KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3);
kafkaOptions.put(KafkaConfiguration.MIN_INSYNC_REPLICAS, 2);
Kafka kafka = new KafkaBuilder(ResourceUtils.createKafka(NAMESPACE, NAME, 3, IMAGE, HEALTH_DELAY, HEALTH_TIMEOUT, null, kafkaOptions, emptyMap(), new EphemeralStorage(), new EphemeralStorage(), null, null, null, null)).editSpec().editKafka().withVersion(KafkaVersionTestUtils.LATEST_KAFKA_VERSION).withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withTls(false).withType(KafkaListenerType.INTERNAL).build()).endKafka().endSpec().build();
KafkaSpecChecker checker = generateChecker(kafka);
List<Condition> warnings = checker.run();
assertThat(warnings, hasSize(1));
Condition warning = warnings.get(0);
assertThat(warning.getReason(), is("KafkaInterBrokerProtocolVersion"));
assertThat(warning.getStatus(), is("True"));
assertThat(warning.getMessage(), is("inter.broker.protocol.version does not match the Kafka cluster version, which suggests that an upgrade is incomplete."));
}
use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class ConnectIsolatedST method testSecretsWithKafkaConnectWithTlsAndScramShaAuthentication.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testSecretsWithKafkaConnectWithTlsAndScramShaAuthentication(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(new KafkaListenerAuthenticationScramSha512()).build()).endKafka().endSpec().build());
KafkaUser kafkaUser = KafkaUserTemplates.scramShaUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
resourceManager.createResource(extensionContext, kafkaUser);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 1).editSpec().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").withNewTls().addNewTrustedCertificate().withSecretName(clusterName + "-cluster-ca-cert").withCertificate("ca.crt").endTrustedCertificate().endTls().withBootstrapServers(clusterName + "-kafka-bootstrap:9093").withNewKafkaClientAuthenticationScramSha512().withUsername(userName).withNewPasswordSecret().withSecretName(userName).withPassword("password").endPasswordSecret().endKafkaClientAuthenticationScramSha512().endSpec().build());
final String kafkaConnectPodName = kubeClient(namespaceName).listPodsByPrefixInName(KafkaConnectResources.deploymentName(clusterName)).get(0).getMetadata().getName();
final String kafkaConnectLogs = kubeClient(namespaceName).logs(kafkaConnectPodName);
final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(kafkaClientsName).get(0).getMetadata().getName();
LOGGER.info("Verifying that KafkaConnect pod logs don't contain ERRORs");
assertThat(kafkaConnectLogs, not(containsString("ERROR")));
LOGGER.info("Creating FileStreamSink connector via pod {} with topic {}", kafkaClientsPodName, topicName);
KafkaConnectorUtils.createFileSinkConnector(namespaceName, kafkaClientsPodName, topicName, Constants.DEFAULT_SINK_FILE_PATH, KafkaConnectResources.url(clusterName, namespaceName, 8083));
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(namespaceName, true, kafkaClientsName + "-second", kafkaUser).build());
final String kafkaClientsSecondPodName = kubeClient(namespaceName).listPodsByPrefixInName(kafkaClientsName + "-second").get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsSecondPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesTls(), internalKafkaClient.receiveMessagesTls());
KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(namespaceName, kafkaConnectPodName, Constants.DEFAULT_SINK_FILE_PATH, "99");
}
use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class ConnectIsolatedST method testConnectTlsAuthWithWeirdUserName.
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(CONNECTOR_OPERATOR)
@ParallelNamespaceTest
void testConnectTlsAuthWithWeirdUserName(ExtensionContext extensionContext) {
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
// Create weird named user with . and maximum of 64 chars -> TLS
final String weirdUserName = "jjglmahyijoambryleyxjjglmahy.ijoambryleyxjjglmahyijoambryleyxasd";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(new KafkaListenerAuthenticationTls()).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withAuth(new KafkaListenerAuthenticationTls()).build()).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, weirdUserName).build());
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 1).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editSpec().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").withNewTls().withTrustedCertificates(new CertSecretSourceBuilder().withCertificate("ca.crt").withSecretName(KafkaResources.clusterCaCertificateSecretName(clusterName)).build()).endTls().withNewKafkaClientAuthenticationTls().withNewCertificateAndKey().withSecretName(weirdUserName).withCertificate("user.crt").withKey("user.key").endCertificateAndKey().endKafkaClientAuthenticationTls().withBootstrapServers(KafkaResources.tlsBootstrapAddress(clusterName)).endSpec().build());
testConnectAuthorizationWithWeirdUserName(extensionContext, clusterName, weirdUserName, SecurityProtocol.SSL, topicName);
}
use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class ConnectIsolatedST method testKafkaConnectWithPlainAndScramShaAuthentication.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testKafkaConnectWithPlainAndScramShaAuthentication(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
// Use a Kafka with plain listener disabled
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withAuth(new KafkaListenerAuthenticationScramSha512()).build()).endKafka().endSpec().build());
KafkaUser kafkaUser = KafkaUserTemplates.scramShaUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.scramShaUser(clusterName, userName).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 1).withNewSpec().withBootstrapServers(KafkaResources.plainBootstrapAddress(clusterName)).withNewKafkaClientAuthenticationScramSha512().withUsername(userName).withPasswordSecret(new PasswordSecretSourceBuilder().withSecretName(userName).withPassword("password").build()).endKafkaClientAuthenticationScramSha512().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").withVersion(Environment.ST_KAFKA_VERSION).withReplicas(1).endSpec().build());
final String kafkaConnectPodName = kubeClient(namespaceName).listPodsByPrefixInName(KafkaConnectResources.deploymentName(clusterName)).get(0).getMetadata().getName();
final String kafkaConnectLogs = kubeClient(namespaceName).logs(kafkaConnectPodName);
final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(kafkaClientsName).get(0).getMetadata().getName();
KafkaConnectUtils.waitUntilKafkaConnectRestApiIsAvailable(namespaceName, kafkaConnectPodName);
LOGGER.info("Verifying that KafkaConnect pod logs don't contain ERRORs");
assertThat(kafkaConnectLogs, not(containsString("ERROR")));
LOGGER.info("Creating FileStreamSink connector via pod {} with topic {}", kafkaClientsPodName, topicName);
KafkaConnectorUtils.createFileSinkConnector(namespaceName, kafkaClientsPodName, topicName, Constants.DEFAULT_SINK_FILE_PATH, KafkaConnectResources.url(clusterName, namespaceName, 8083));
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(namespaceName, false, kafkaClientsName + "-second", kafkaUser).build());
final String kafkaClientsSecondPodName = kubeClient(namespaceName).listPodsByPrefixInName(kafkaClientsName + "-second").get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsSecondPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain());
KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(namespaceName, kafkaConnectPodName, Constants.DEFAULT_SINK_FILE_PATH, "99");
}
Aggregations