use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class CruiseControlApiST method testCruiseControlBasicAPIRequests.
@ParallelNamespaceTest
@KRaftNotSupported("TopicOperator is not supported by KRaft mode and is used in this test class")
void testCruiseControlBasicAPIRequests(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), 3, 3).build());
LOGGER.info("----> CRUISE CONTROL DEPLOYMENT STATE ENDPOINT <----");
String response = CruiseControlUtils.callApi(testStorage.getNamespaceName(), CruiseControlUtils.SupportedHttpMethods.POST, CruiseControlEndpoints.STATE, CruiseControlUtils.SupportedSchemes.HTTPS, true);
assertThat(response, is("Unrecognized endpoint in request '/state'\n" + "Supported POST endpoints: [ADD_BROKER, REMOVE_BROKER, FIX_OFFLINE_REPLICAS, REBALANCE, STOP_PROPOSAL_EXECUTION, PAUSE_SAMPLING, " + "RESUME_SAMPLING, DEMOTE_BROKER, ADMIN, REVIEW, TOPIC_CONFIGURATION, RIGHTSIZE]\n"));
response = CruiseControlUtils.callApi(testStorage.getNamespaceName(), CruiseControlUtils.SupportedHttpMethods.GET, CruiseControlEndpoints.STATE, CruiseControlUtils.SupportedSchemes.HTTPS, true);
LOGGER.info("Verifying that {} REST API is available", CRUISE_CONTROL_NAME);
assertThat(response, not(containsString("404")));
assertThat(response, containsString("RUNNING"));
assertThat(response, containsString("NO_TASK_IN_PROGRESS"));
CruiseControlUtils.verifyThatCruiseControlTopicsArePresent(testStorage.getNamespaceName());
LOGGER.info("----> KAFKA REBALANCE <----");
response = CruiseControlUtils.callApi(testStorage.getNamespaceName(), CruiseControlUtils.SupportedHttpMethods.GET, CruiseControlEndpoints.REBALANCE, CruiseControlUtils.SupportedSchemes.HTTPS, true);
assertThat(response, is("Unrecognized endpoint in request '/rebalance'\n" + "Supported GET endpoints: [BOOTSTRAP, TRAIN, LOAD, PARTITION_LOAD, PROPOSALS, STATE, KAFKA_CLUSTER_STATE, USER_TASKS, REVIEW_BOARD]\n"));
LOGGER.info("Waiting for CC will have for enough metrics to be recorded to make a proposal ");
CruiseControlUtils.waitForRebalanceEndpointIsReady(testStorage.getNamespaceName());
response = CruiseControlUtils.callApi(testStorage.getNamespaceName(), CruiseControlUtils.SupportedHttpMethods.POST, CruiseControlEndpoints.REBALANCE, CruiseControlUtils.SupportedSchemes.HTTPS, true);
// all goals stats that contains
assertThat(response, containsString("RackAwareGoal"));
assertThat(response, containsString("ReplicaCapacityGoal"));
assertThat(response, containsString("DiskCapacityGoal"));
assertThat(response, containsString("NetworkInboundCapacityGoal"));
assertThat(response, containsString("NetworkOutboundCapacityGoal"));
assertThat(response, containsString("CpuCapacityGoal"));
assertThat(response, containsString("ReplicaDistributionGoal"));
assertThat(response, containsString("DiskUsageDistributionGoal"));
assertThat(response, containsString("NetworkInboundUsageDistributionGoal"));
assertThat(response, containsString("NetworkOutboundUsageDistributionGoal"));
assertThat(response, containsString("CpuUsageDistributionGoal"));
assertThat(response, containsString("TopicReplicaDistributionGoal"));
assertThat(response, containsString("LeaderReplicaDistributionGoal"));
assertThat(response, containsString("LeaderBytesInDistributionGoal"));
assertThat(response, containsString("PreferredLeaderElectionGoal"));
assertThat(response, containsString("Cluster load after rebalance"));
LOGGER.info("----> EXECUTION OF STOP PROPOSAL <----");
response = CruiseControlUtils.callApi(testStorage.getNamespaceName(), CruiseControlUtils.SupportedHttpMethods.GET, CruiseControlEndpoints.STOP, CruiseControlUtils.SupportedSchemes.HTTPS, true);
assertThat(response, is("Unrecognized endpoint in request '/stop_proposal_execution'\n" + "Supported GET endpoints: [BOOTSTRAP, TRAIN, LOAD, PARTITION_LOAD, PROPOSALS, STATE, KAFKA_CLUSTER_STATE, USER_TASKS, REVIEW_BOARD]\n"));
response = CruiseControlUtils.callApi(testStorage.getNamespaceName(), CruiseControlUtils.SupportedHttpMethods.POST, CruiseControlEndpoints.STOP, CruiseControlUtils.SupportedSchemes.HTTPS, true);
assertThat(response, containsString("Proposal execution stopped."));
LOGGER.info("----> USER TASKS <----");
response = CruiseControlUtils.callApi(testStorage.getNamespaceName(), CruiseControlUtils.SupportedHttpMethods.POST, CruiseControlEndpoints.USER_TASKS, CruiseControlUtils.SupportedSchemes.HTTPS, true);
assertThat(response, is("Unrecognized endpoint in request '/user_tasks'\n" + "Supported POST endpoints: [ADD_BROKER, REMOVE_BROKER, FIX_OFFLINE_REPLICAS, REBALANCE, STOP_PROPOSAL_EXECUTION, PAUSE_SAMPLING, " + "RESUME_SAMPLING, DEMOTE_BROKER, ADMIN, REVIEW, TOPIC_CONFIGURATION, RIGHTSIZE]\n"));
response = CruiseControlUtils.callApi(testStorage.getNamespaceName(), CruiseControlUtils.SupportedHttpMethods.GET, CruiseControlEndpoints.USER_TASKS, CruiseControlUtils.SupportedSchemes.HTTPS, true);
assertThat(response, containsString("GET"));
assertThat(response, containsString(CruiseControlEndpoints.STATE.toString()));
assertThat(response, containsString("POST"));
assertThat(response, containsString(CruiseControlEndpoints.REBALANCE.toString()));
assertThat(response, containsString(CruiseControlEndpoints.STOP.toString()));
assertThat(response, containsString(CruiseControlUserTaskStatus.COMPLETED.toString()));
LOGGER.info("Verifying that {} REST API doesn't allow HTTP requests", CRUISE_CONTROL_NAME);
response = CruiseControlUtils.callApi(testStorage.getNamespaceName(), CruiseControlUtils.SupportedHttpMethods.GET, CruiseControlEndpoints.STATE, CruiseControlUtils.SupportedSchemes.HTTP, false);
assertThat(response, not(containsString("RUNNING")));
assertThat(response, not(containsString("NO_TASK_IN_PROGRESS")));
LOGGER.info("Verifying that {} REST API doesn't allow unauthenticated requests", CRUISE_CONTROL_NAME);
response = CruiseControlUtils.callApi(testStorage.getNamespaceName(), CruiseControlUtils.SupportedHttpMethods.GET, CruiseControlEndpoints.STATE, CruiseControlUtils.SupportedSchemes.HTTPS, false);
assertThat(response, containsString("401 Unauthorized"));
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class CruiseControlApiST method testCruiseControlBasicAPIRequestsWithSecurityDisabled.
@ParallelNamespaceTest
void testCruiseControlBasicAPIRequestsWithSecurityDisabled(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaWithCruiseControl(cruiseControlApiClusterName, 3, 3).editOrNewSpec().withNewCruiseControl().withConfig(new HashMap<String, Object>() {
{
put("webserver.security.enable", "false");
put("webserver.ssl.enable", "false");
}
}).endCruiseControl().endSpec().build());
LOGGER.info("----> CRUISE CONTROL DEPLOYMENT STATE ENDPOINT <----");
String response = CruiseControlUtils.callApi(testStorage.getNamespaceName(), CruiseControlUtils.SupportedHttpMethods.GET, CruiseControlEndpoints.STATE, CruiseControlUtils.SupportedSchemes.HTTP, false);
LOGGER.info("Verifying that {} REST API is available using HTTP request without credentials", CRUISE_CONTROL_NAME);
assertThat(response, not(containsString("404")));
assertThat(response, containsString("RUNNING"));
assertThat(response, containsString("NO_TASK_IN_PROGRESS"));
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class CruiseControlST method testCruiseControlIntraBrokerBalancing.
@ParallelNamespaceTest
@KRaftNotSupported("JBOD is not supported by KRaft mode and is used in this test case.")
void testCruiseControlIntraBrokerBalancing(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext);
String diskSize = "6Gi";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(0).withSize(diskSize).build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSize).build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), 3, 3).editMetadata().withNamespace(testStorage.getNamespaceName()).endMetadata().editOrNewSpec().editKafka().withStorage(jbodStorage).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaRebalanceTemplates.kafkaRebalance(testStorage.getClusterName()).editMetadata().withNamespace(testStorage.getNamespaceName()).endMetadata().editOrNewSpec().withRebalanceDisk(true).endSpec().build());
KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(testStorage.getNamespaceName(), testStorage.getClusterName(), KafkaRebalanceState.ProposalReady);
LOGGER.info("Checking status of KafkaRebalance");
// The provision status should be "UNDECIDED" when doing an intra-broker disk balance because it is irrelevant to the provision status
KafkaRebalanceStatus kafkaRebalanceStatus = KafkaRebalanceResource.kafkaRebalanceClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus();
assertThat(kafkaRebalanceStatus.getOptimizationResult().get("provisionStatus").toString(), containsString("UNDECIDED"));
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class KafkaST method testLabelModificationDoesNotBreakCluster.
@ParallelNamespaceTest
@SuppressWarnings({ "checkstyle:MethodLength" })
@Tag(INTERNAL_CLIENTS_USED)
void testLabelModificationDoesNotBreakCluster(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext);
final int kafkaReplicas = 3;
Map<String, String> labels = new HashMap<>();
final String[] labelKeys = { "label-name-1", "label-name-2", "" };
final String[] labelValues = { "name-of-the-label-1", "name-of-the-label-2", "" };
labels.put(labelKeys[0], labelValues[0]);
labels.put(labelKeys[1], labelValues[1]);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), kafkaReplicas, 1).editMetadata().withLabels(labels).endMetadata().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(testStorage.getTopicName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withMessageCount(MESSAGE_COUNT).build();
resourceManager.createResource(extensionContext, kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
Map<String, String> zkPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getZookeeperSelector());
Map<String, String> kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
StUtils.waitForStatefulSetOrStrimziPodSetLabelsChange(testStorage.getNamespaceName(), KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()), labels);
LOGGER.info("Getting labels from stateful set resource");
Map<String, String> kafkaLabels = StUtils.getLabelsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()));
LOGGER.info("Verifying default labels in the Kafka CR");
assertThat("Label exists in stateful set with concrete value", labelValues[0].equals(kafkaLabels.get(labelKeys[0])));
assertThat("Label exists in stateful set with concrete value", labelValues[1].equals(kafkaLabels.get(labelKeys[1])));
labelValues[0] = "new-name-of-the-label-1";
labelValues[1] = "new-name-of-the-label-2";
labelKeys[2] = "label-name-3";
labelValues[2] = "name-of-the-label-3";
LOGGER.info("Setting new values of labels from {} to {} | from {} to {} and adding one {} with value {}", "name-of-the-label-1", labelValues[0], "name-of-the-label-2", labelValues[1], labelKeys[2], labelValues[2]);
LOGGER.info("Edit kafka labels in Kafka CR");
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), resource -> {
resource.getMetadata().getLabels().put(labelKeys[0], labelValues[0]);
resource.getMetadata().getLabels().put(labelKeys[1], labelValues[1]);
resource.getMetadata().getLabels().put(labelKeys[2], labelValues[2]);
}, testStorage.getNamespaceName());
if (!Environment.isKRaftModeEnabled()) {
zkPods = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), 1, zkPods);
}
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
labels.put(labelKeys[0], labelValues[0]);
labels.put(labelKeys[1], labelValues[1]);
labels.put(labelKeys[2], labelValues[2]);
LOGGER.info("Waiting for kafka service labels changed {}", labels);
ServiceUtils.waitForServiceLabelsChange(testStorage.getNamespaceName(), KafkaResources.brokersServiceName(testStorage.getClusterName()), labels);
LOGGER.info("Verifying kafka labels via services");
Service service = kubeClient(testStorage.getNamespaceName()).getService(testStorage.getNamespaceName(), KafkaResources.brokersServiceName(testStorage.getClusterName()));
verifyPresentLabels(labels, service.getMetadata().getLabels());
for (String cmName : StUtils.getKafkaConfigurationConfigMaps(testStorage.getClusterName(), kafkaReplicas)) {
LOGGER.info("Waiting for Kafka ConfigMap {} in namespace {} to have new labels: {}", cmName, testStorage.getNamespaceName(), labels);
ConfigMapUtils.waitForConfigMapLabelsChange(testStorage.getNamespaceName(), cmName, labels);
LOGGER.info("Verifying Kafka labels on ConfigMap {} in namespace {}", cmName, testStorage.getNamespaceName());
ConfigMap configMap = kubeClient(testStorage.getNamespaceName()).getConfigMap(testStorage.getNamespaceName(), cmName);
verifyPresentLabels(labels, configMap.getMetadata().getLabels());
}
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
StUtils.waitForStatefulSetOrStrimziPodSetLabelsChange(testStorage.getNamespaceName(), KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()), labels);
LOGGER.info("Verifying kafka labels via stateful set");
verifyPresentLabels(labels, StUtils.getLabelsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), KafkaResources.kafkaStatefulSetName(testStorage.getClusterName())));
LOGGER.info("Verifying via kafka pods");
labels = kubeClient(testStorage.getNamespaceName()).getPod(testStorage.getNamespaceName(), KafkaResources.kafkaPodName(testStorage.getClusterName(), 0)).getMetadata().getLabels();
assertThat("Label exists in kafka pods", labelValues[0].equals(labels.get(labelKeys[0])));
assertThat("Label exists in kafka pods", labelValues[1].equals(labels.get(labelKeys[1])));
assertThat("Label exists in kafka pods", labelValues[2].equals(labels.get(labelKeys[2])));
LOGGER.info("Removing labels: {} -> {}, {} -> {}, {} -> {}", labelKeys[0], labels.get(labelKeys[0]), labelKeys[1], labels.get(labelKeys[1]), labelKeys[2], labels.get(labelKeys[2]));
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), resource -> {
resource.getMetadata().getLabels().remove(labelKeys[0]);
resource.getMetadata().getLabels().remove(labelKeys[1]);
resource.getMetadata().getLabels().remove(labelKeys[2]);
}, testStorage.getNamespaceName());
if (!Environment.isKRaftModeEnabled()) {
RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), 1, zkPods);
}
RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
labels.remove(labelKeys[0]);
labels.remove(labelKeys[1]);
labels.remove(labelKeys[2]);
LOGGER.info("Waiting for kafka service labels deletion {}", labels.toString());
ServiceUtils.waitForServiceLabelsDeletion(testStorage.getNamespaceName(), KafkaResources.brokersServiceName(testStorage.getClusterName()), labelKeys[0], labelKeys[1], labelKeys[2]);
LOGGER.info("Verifying kafka labels via services");
service = kubeClient(testStorage.getNamespaceName()).getService(testStorage.getNamespaceName(), KafkaResources.brokersServiceName(testStorage.getClusterName()));
verifyNullLabels(labelKeys, service);
for (String cmName : StUtils.getKafkaConfigurationConfigMaps(testStorage.getClusterName(), kafkaReplicas)) {
LOGGER.info("Waiting for Kafka ConfigMap {} in namespace {} to have labels removed: {}", cmName, testStorage.getNamespaceName(), labelKeys);
ConfigMapUtils.waitForConfigMapLabelsDeletion(testStorage.getNamespaceName(), cmName, labelKeys[0], labelKeys[1], labelKeys[2]);
LOGGER.info("Verifying Kafka labels on ConfigMap {} in namespace {}", cmName, testStorage.getNamespaceName());
ConfigMap configMap = kubeClient(testStorage.getNamespaceName()).getConfigMap(testStorage.getNamespaceName(), cmName);
verifyNullLabels(labelKeys, configMap);
}
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
StUtils.waitForStatefulSetOrStrimziPodSetLabelsDeletion(testStorage.getNamespaceName(), KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()), labelKeys[0], labelKeys[1], labelKeys[2]);
LOGGER.info("Verifying kafka labels via stateful set");
verifyNullLabels(labelKeys, StUtils.getLabelsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), KafkaResources.kafkaStatefulSetName(testStorage.getClusterName())));
LOGGER.info("Waiting for kafka pod labels deletion {}", labels.toString());
PodUtils.waitUntilPodLabelsDeletion(testStorage.getNamespaceName(), KafkaResources.kafkaPodName(testStorage.getClusterName(), 0), labelKeys[0], labelKeys[1], labelKeys[2]);
labels = kubeClient(testStorage.getNamespaceName()).getPod(testStorage.getNamespaceName(), KafkaResources.kafkaPodName(testStorage.getClusterName(), 0)).getMetadata().getLabels();
LOGGER.info("Verifying via kafka pods");
verifyNullLabels(labelKeys, labels);
resourceManager.createResource(extensionContext, kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class KafkaST method testReadOnlyRootFileSystem.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@Tag(CRUISE_CONTROL)
void testReadOnlyRootFileSystem(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 3).editSpec().editKafka().withNewTemplate().withNewKafkaContainer().withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()).endKafkaContainer().endTemplate().endKafka().editZookeeper().withNewTemplate().withNewZookeeperContainer().withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()).endZookeeperContainer().endTemplate().endZookeeper().editEntityOperator().withNewTemplate().withNewTlsSidecarContainer().withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()).endTlsSidecarContainer().withNewTopicOperatorContainer().withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()).endTopicOperatorContainer().withNewUserOperatorContainer().withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()).endUserOperatorContainer().endTemplate().endEntityOperator().editOrNewKafkaExporter().withNewTemplate().withNewContainer().withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()).endContainer().endTemplate().endKafkaExporter().editOrNewCruiseControl().withNewTemplate().withNewTlsSidecarContainer().withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()).endTlsSidecarContainer().withNewCruiseControlContainer().withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()).endCruiseControlContainer().endTemplate().endCruiseControl().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(testStorage.getTopicName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withMessageCount(MESSAGE_COUNT).build();
resourceManager.createResource(extensionContext, kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
Aggregations