use of io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED in project strimzi by strimzi.
the class DynamicConfST method testUpdateToExternalListenerCausesRollingRestartUsingExternalClients.
@IsolatedTest("Using more tha one Kafka cluster in one namespace")
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(ROLLING_UPDATE)
void testUpdateToExternalListenerCausesRollingRestartUsingExternalClients(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
Map<String, Object> deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()).withConfig(deepCopyOfShardKafkaConfig).endKafka().endSpec().build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, namespace).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(namespace, clusterName, userName).build());
ExternalKafkaClient externalKafkaClientTls = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
ExternalKafkaClient externalKafkaClientPlain = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(SecurityProtocol.PLAINTEXT).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
externalKafkaClientPlain.verifyProducedAndConsumedMessages(externalKafkaClientPlain.sendMessagesPlain(), externalKafkaClientPlain.receiveMessagesPlain());
assertThrows(Exception.class, () -> {
externalKafkaClientTls.sendMessagesTls();
externalKafkaClientTls.receiveMessagesTls();
LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to plain communication");
});
LOGGER.info("Updating listeners of Kafka cluster");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build()));
}, namespace);
// TODO: remove it ?
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
externalKafkaClientTls.verifyProducedAndConsumedMessages(externalKafkaClientTls.sendMessagesTls() + MESSAGE_COUNT, externalKafkaClientTls.receiveMessagesTls());
assertThrows(Exception.class, () -> {
externalKafkaClientPlain.sendMessagesPlain();
externalKafkaClientPlain.receiveMessagesPlain();
LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to tls communication");
});
LOGGER.info("Updating listeners of Kafka cluster");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getKafka().setListeners(Collections.singletonList(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()));
}, namespace);
RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
assertThrows(Exception.class, () -> {
externalKafkaClientTls.sendMessagesTls();
externalKafkaClientTls.receiveMessagesTls();
LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to plain communication");
});
externalKafkaClientPlain.verifyProducedAndConsumedMessages(externalKafkaClientPlain.sendMessagesPlain() + MESSAGE_COUNT, externalKafkaClientPlain.receiveMessagesPlain());
}
use of io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED in project strimzi by strimzi.
the class DynamicConfST method testUpdateToExternalListenerCausesRollingRestart.
@Tag(NODEPORT_SUPPORTED)
@Tag(ROLLING_UPDATE)
@IsolatedTest("Using more tha one Kafka cluster in one namespace")
void testUpdateToExternalListenerCausesRollingRestart(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
Map<String, Object> deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()).withConfig(deepCopyOfShardKafkaConfig).endKafka().endSpec().build());
String kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", true);
updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + true));
// Edit listeners - this should cause RU (because of new crts)
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
LOGGER.info("Updating listeners of Kafka cluster");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
}, namespace);
RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
assertThat(RollingUpdateUtils.componentHasRolled(namespace, kafkaSelector, kafkaPods), is(true));
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
deepCopyOfShardKafkaConfig.put("compression.type", "snappy");
updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("compression.type=snappy"));
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", true);
updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + true));
// Remove external listeners (node port) - this should cause RU (we need to update advertised.listeners)
// Other external listeners cases are rolling because of crts
kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
LOGGER.info("Updating listeners of Kafka cluster");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
}, namespace);
RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
assertThat(RollingUpdateUtils.componentHasRolled(namespace, kafkaSelector, kafkaPods), is(true));
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", false);
updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + false));
}
use of io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED in project strimzi by strimzi.
the class ListenersST method testAdvertisedHostNamesAppearsInBrokerCerts.
@Tag(NODEPORT_SUPPORTED)
@ParallelNamespaceTest
void testAdvertisedHostNamesAppearsInBrokerCerts(ExtensionContext extensionContext) throws CertificateException {
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String advertHostInternal0 = "kafka-test.internal.0.net";
final String advertHostInternal1 = "kafka-test.internal.1.net";
final String advertHostInternal2 = "kafka-test.internal.2.net";
List<String> advertHostInternalList = asList(advertHostInternal0, advertHostInternal1, advertHostInternal2);
final int advertPortInternalListener = 9999;
final String advertHostExternal0 = "kafka-test.external.0.net";
final String advertHostExternal1 = "kafka-test.external.1.net";
final String advertHostExternal2 = "kafka-test.external.2.net";
List<String> advertHostExternalList = asList(advertHostExternal0, advertHostExternal1, advertHostExternal2);
final int advertPortExternalListener = 9888;
GenericKafkaListenerConfigurationBroker brokerInternal0 = new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(0).withAdvertisedHost(advertHostInternal0).withAdvertisedPort(advertPortInternalListener).build();
GenericKafkaListenerConfigurationBroker brokerInternal1 = new GenericKafkaListenerConfigurationBrokerBuilder(brokerInternal0).withBroker(1).withAdvertisedHost(advertHostInternal1).build();
GenericKafkaListenerConfigurationBroker brokerInternal2 = new GenericKafkaListenerConfigurationBrokerBuilder(brokerInternal0).withBroker(2).withAdvertisedHost(advertHostInternal2).build();
GenericKafkaListenerConfigurationBroker brokerExternal0 = new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(0).withAdvertisedHost(advertHostExternal0).withAdvertisedPort(advertPortExternalListener).build();
GenericKafkaListenerConfigurationBroker brokerExternal1 = new GenericKafkaListenerConfigurationBrokerBuilder(brokerExternal0).withBroker(1).withAdvertisedHost(advertHostExternal1).build();
GenericKafkaListenerConfigurationBroker brokerExternal2 = new GenericKafkaListenerConfigurationBrokerBuilder(brokerExternal0).withBroker(2).withAdvertisedHost(advertHostExternal2).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 3).editSpec().editKafka().withListeners(asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9098).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withBrokers(asList(brokerInternal0, brokerInternal1, brokerInternal2)).endConfiguration().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9099).withType(KafkaListenerType.NODEPORT).withTls(true).withNewConfiguration().withBrokers(asList(brokerExternal0, brokerExternal1, brokerExternal2)).endConfiguration().build())).endKafka().endSpec().build());
Map<String, String> secretData = kubeClient().getSecret(namespaceName, KafkaResources.brokersServiceName(clusterName)).getData();
List<String> kafkaPods = kubeClient().listPodNamesInSpecificNamespace(namespaceName, Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND).stream().filter(podName -> podName.contains("kafka")).collect(Collectors.toList());
int index = 0;
for (String kafkaBroker : kafkaPods) {
String cert = secretData.get(kafkaBroker + ".crt");
LOGGER.info("Encoding {}.crt", kafkaBroker);
ByteArrayInputStream publicCert = new ByteArrayInputStream(Base64.getDecoder().decode(cert.getBytes()));
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
Certificate certificate = certificateFactory.generateCertificate(publicCert);
assertThat(certificate.toString(), containsString(advertHostInternalList.get(index)));
assertThat(certificate.toString(), containsString(advertHostExternalList.get(index++)));
}
}
use of io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED in project strimzi-kafka-operator by strimzi.
the class DynamicConfST method testUpdateToExternalListenerCausesRollingRestart.
@Tag(NODEPORT_SUPPORTED)
@Tag(ROLLING_UPDATE)
@IsolatedTest("Using more tha one Kafka cluster in one namespace")
void testUpdateToExternalListenerCausesRollingRestart(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
Map<String, Object> deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()).withConfig(deepCopyOfShardKafkaConfig).endKafka().endSpec().build());
String kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", true);
updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + true));
// Edit listeners - this should cause RU (because of new crts)
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
LOGGER.info("Updating listeners of Kafka cluster");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
}, namespace);
RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
assertThat(RollingUpdateUtils.componentHasRolled(namespace, kafkaSelector, kafkaPods), is(true));
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
deepCopyOfShardKafkaConfig.put("compression.type", "snappy");
updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("compression.type=snappy"));
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", true);
updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + true));
// Remove external listeners (node port) - this should cause RU (we need to update advertised.listeners)
// Other external listeners cases are rolling because of crts
kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
LOGGER.info("Updating listeners of Kafka cluster");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
}, namespace);
RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
assertThat(RollingUpdateUtils.componentHasRolled(namespace, kafkaSelector, kafkaPods), is(true));
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", false);
updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + false));
}
use of io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED in project strimzi-kafka-operator by strimzi.
the class DynamicConfST method testUpdateToExternalListenerCausesRollingRestartUsingExternalClients.
@IsolatedTest("Using more tha one Kafka cluster in one namespace")
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(ROLLING_UPDATE)
void testUpdateToExternalListenerCausesRollingRestartUsingExternalClients(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
Map<String, Object> deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()).withConfig(deepCopyOfShardKafkaConfig).endKafka().endSpec().build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, namespace).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(namespace, clusterName, userName).build());
ExternalKafkaClient externalKafkaClientTls = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
ExternalKafkaClient externalKafkaClientPlain = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(SecurityProtocol.PLAINTEXT).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
externalKafkaClientPlain.verifyProducedAndConsumedMessages(externalKafkaClientPlain.sendMessagesPlain(), externalKafkaClientPlain.receiveMessagesPlain());
assertThrows(Exception.class, () -> {
externalKafkaClientTls.sendMessagesTls();
externalKafkaClientTls.receiveMessagesTls();
LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to plain communication");
});
LOGGER.info("Updating listeners of Kafka cluster");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build()));
}, namespace);
// TODO: remove it ?
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
externalKafkaClientTls.verifyProducedAndConsumedMessages(externalKafkaClientTls.sendMessagesTls() + MESSAGE_COUNT, externalKafkaClientTls.receiveMessagesTls());
assertThrows(Exception.class, () -> {
externalKafkaClientPlain.sendMessagesPlain();
externalKafkaClientPlain.receiveMessagesPlain();
LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to tls communication");
});
LOGGER.info("Updating listeners of Kafka cluster");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getKafka().setListeners(Collections.singletonList(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()));
}, namespace);
RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
assertThrows(Exception.class, () -> {
externalKafkaClientTls.sendMessagesTls();
externalKafkaClientTls.receiveMessagesTls();
LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to plain communication");
});
externalKafkaClientPlain.verifyProducedAndConsumedMessages(externalKafkaClientPlain.sendMessagesPlain() + MESSAGE_COUNT, externalKafkaClientPlain.receiveMessagesPlain());
}
Aggregations