use of io.strimzi.systemtest.Constants.INFRA_NAMESPACE in project strimzi by strimzi.
the class OauthScopeIsolatedST method testClientScopeKafkaSetIncorrectly.
@IsolatedTest("Modification of shared Kafka cluster")
void testClientScopeKafkaSetIncorrectly(ExtensionContext extensionContext) throws UnexpectedException {
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String producerName = OAUTH_PRODUCER_NAME + "-" + clusterName;
final String consumerName = OAUTH_CONSUMER_NAME + "-" + clusterName;
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(oauthClusterName, KafkaResources.kafkaStatefulSetName(oauthClusterName));
KafkaClients oauthInternalClientChecksJob = new KafkaClientsBuilder().withNamespaceName(INFRA_NAMESPACE).withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.bootstrapServiceName(oauthClusterName) + ":" + scopeListenerPort).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withAdditionalConfig(additionalOauthConfig).build();
// re-configuring Kafka listener to have client scope assigned to null
KafkaResource.replaceKafkaResourceInSpecificNamespace(oauthClusterName, kafka -> {
List<GenericKafkaListener> scopeListeners = kafka.getSpec().getKafka().getListeners().stream().filter(listener -> listener.getName().equals(scopeListener)).collect(Collectors.toList());
((KafkaListenerAuthenticationOAuth) scopeListeners.get(0).getAuth()).setClientScope(null);
kafka.getSpec().getKafka().getListeners().set(0, scopeListeners.get(0));
}, INFRA_NAMESPACE);
RollingUpdateUtils.waitForComponentAndPodsReady(INFRA_NAMESPACE, kafkaSelector, 1);
// verification phase client should fail here because clientScope is set to 'null'
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(oauthClusterName, topicName, INFRA_NAMESPACE).build());
resourceManager.createResource(extensionContext, oauthInternalClientChecksJob.producerStrimzi());
// client should fail because the listener requires scope: 'test' in JWT token but was (the listener) temporarily
// configured without clientScope resulting in a JWT token without the scope claim when using the clientId and
// secret passed via SASL/PLAIN to obtain an access token in client's name.
ClientUtils.waitForClientTimeout(producerName, INFRA_NAMESPACE, MESSAGE_COUNT);
JobUtils.deleteJobWithWait(INFRA_NAMESPACE, producerName);
// rollback previous configuration
// re-configuring Kafka listener to have client scope assigned to 'test'
KafkaResource.replaceKafkaResourceInSpecificNamespace(oauthClusterName, kafka -> {
List<GenericKafkaListener> scopeListeners = kafka.getSpec().getKafka().getListeners().stream().filter(listener -> listener.getName().equals(scopeListener)).collect(Collectors.toList());
((KafkaListenerAuthenticationOAuth) scopeListeners.get(0).getAuth()).setClientScope("test");
kafka.getSpec().getKafka().getListeners().set(0, scopeListeners.get(0));
}, INFRA_NAMESPACE);
RollingUpdateUtils.waitForComponentAndPodsReady(INFRA_NAMESPACE, kafkaSelector, 1);
}
use of io.strimzi.systemtest.Constants.INFRA_NAMESPACE in project strimzi by strimzi.
the class RollingUpdateST method testClusterOperatorFinishAllRollingUpdates.
@IsolatedTest("Deleting Pod of Shared Cluster Operator")
@Tag(ROLLING_UPDATE)
void testClusterOperatorFinishAllRollingUpdates(ExtensionContext extensionContext) {
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editMetadata().withNamespace(namespace).endMetadata().build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
Map<String, String> zkPods = PodUtils.podSnapshot(namespace, zkSelector);
// Changes to readiness probe should trigger a rolling update
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
kafka.getSpec().getKafka().setReadinessProbe(new ProbeBuilder().withTimeoutSeconds(6).build());
kafka.getSpec().getZookeeper().setReadinessProbe(new ProbeBuilder().withTimeoutSeconds(6).build());
}, namespace);
TestUtils.waitFor("rolling update starts", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_STATUS_TIMEOUT, () -> kubeClient(namespace).listPods(namespace).stream().filter(pod -> pod.getStatus().getPhase().equals("Running")).map(pod -> pod.getStatus().getPhase()).collect(Collectors.toList()).size() < kubeClient().listPods().size());
LabelSelector coLabelSelector = kubeClient(INFRA_NAMESPACE).getDeployment(INFRA_NAMESPACE, ResourceManager.getCoDeploymentName()).getSpec().getSelector();
LOGGER.info("Deleting Cluster Operator pod with labels {}", coLabelSelector);
kubeClient(INFRA_NAMESPACE).deletePodsByLabelSelector(coLabelSelector);
LOGGER.info("Cluster Operator pod deleted");
RollingUpdateUtils.waitTillComponentHasRolled(namespace, zkSelector, 3, zkPods);
TestUtils.waitFor("rolling update starts", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_STATUS_TIMEOUT, () -> kubeClient(namespace).listPods().stream().map(pod -> pod.getStatus().getPhase()).collect(Collectors.toList()).contains("Pending"));
LOGGER.info("Deleting Cluster Operator pod with labels {}", coLabelSelector);
kubeClient(INFRA_NAMESPACE).deletePodsByLabelSelector(coLabelSelector);
LOGGER.info("Cluster Operator pod deleted");
RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, 3, kafkaPods);
}
use of io.strimzi.systemtest.Constants.INFRA_NAMESPACE in project strimzi by strimzi.
the class NamespaceRbacScopeOperatorIsolatedST method testNamespacedRbacScopeDeploysRoles.
@IsolatedTest("This test case needs own Cluster Operator")
void testNamespacedRbacScopeDeploysRoles(ExtensionContext extensionContext) {
assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
clusterOperator.unInstall();
clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withExtraEnvVars(Collections.singletonList(new EnvVar(Environment.STRIMZI_RBAC_SCOPE_ENV, Environment.STRIMZI_RBAC_SCOPE_NAMESPACE, null))).createInstallation().runInstallation();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 3).editMetadata().addToLabels("app", "strimzi").endMetadata().build());
// Wait for Kafka to be Ready to ensure all potentially erroneous ClusterRole applications have happened
KafkaUtils.waitForKafkaReady(clusterName);
// Assert that no ClusterRoles are present on the server that have app strimzi
// Naturally returns false positives if another Strimzi operator has been installed
List<ClusterRole> strimziClusterRoles = kubeClient().listClusterRoles().stream().filter(cr -> {
Map<String, String> labels = cr.getMetadata().getLabels() != null ? cr.getMetadata().getLabels() : Collections.emptyMap();
return "strimzi".equals(labels.get("app"));
}).collect(Collectors.toList());
assertThat(strimziClusterRoles, is(Collections.emptyList()));
}
use of io.strimzi.systemtest.Constants.INFRA_NAMESPACE in project strimzi by strimzi.
the class FeatureGatesIsolatedST method testSwitchingStrimziPodSetFeatureGateOnAndOff.
@IsolatedTest
void testSwitchingStrimziPodSetFeatureGateOnAndOff(ExtensionContext extensionContext) {
assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
int zkReplicas = 3;
int kafkaReplicas = 3;
final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
int messageCount = 500;
List<EnvVar> coEnvVars = new ArrayList<>();
coEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "-UseStrimziPodSets", null));
LOGGER.info("Deploying CO with STS - SPS is disabled");
clusterOperator.unInstall();
clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(coEnvVars).createInstallation().runInstallation();
LOGGER.info("Deploying Kafka");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, kafkaReplicas, zkReplicas).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(INFRA_NAMESPACE, kafkaSelector);
Map<String, String> zkPods = PodUtils.podSnapshot(INFRA_NAMESPACE, zkSelector);
Map<String, String> coPod = DeploymentUtils.depSnapshot(ResourceManager.getCoDeploymentName());
KafkaClients clients = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(1000).withNamespaceName(INFRA_NAMESPACE).build();
resourceManager.createResource(extensionContext, clients.producerStrimzi(), clients.consumerStrimzi());
LOGGER.info("Changing FG env variable to enable SPS");
coEnvVars = kubeClient().getDeployment(Constants.STRIMZI_DEPLOYMENT_NAME).getSpec().getTemplate().getSpec().getContainers().get(0).getEnv();
coEnvVars.stream().filter(env -> env.getName().equals(Environment.STRIMZI_FEATURE_GATES_ENV)).findFirst().get().setValue("+UseStrimziPodSets");
Deployment coDep = kubeClient().getDeployment(Constants.STRIMZI_DEPLOYMENT_NAME);
coDep.getSpec().getTemplate().getSpec().getContainers().get(0).setEnv(coEnvVars);
kubeClient().getClient().apps().deployments().inNamespace(INFRA_NAMESPACE).withName(Constants.STRIMZI_DEPLOYMENT_NAME).replace(coDep);
coPod = DeploymentUtils.waitTillDepHasRolled(Constants.STRIMZI_DEPLOYMENT_NAME, 1, coPod);
zkPods = RollingUpdateUtils.waitTillComponentHasRolled(zkSelector, zkReplicas, zkPods);
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(kafkaSelector, kafkaReplicas, kafkaPods);
KafkaUtils.waitForKafkaReady(clusterName);
LOGGER.info("Changing FG env variable to disable again SPS");
coEnvVars.stream().filter(env -> env.getName().equals(Environment.STRIMZI_FEATURE_GATES_ENV)).findFirst().get().setValue("");
coDep = kubeClient().getDeployment(Constants.STRIMZI_DEPLOYMENT_NAME);
coDep.getSpec().getTemplate().getSpec().getContainers().get(0).setEnv(coEnvVars);
kubeClient().getClient().apps().deployments().inNamespace(INFRA_NAMESPACE).withName(Constants.STRIMZI_DEPLOYMENT_NAME).replace(coDep);
DeploymentUtils.waitTillDepHasRolled(Constants.STRIMZI_DEPLOYMENT_NAME, 1, coPod);
RollingUpdateUtils.waitTillComponentHasRolled(zkSelector, zkReplicas, zkPods);
RollingUpdateUtils.waitTillComponentHasRolled(kafkaSelector, kafkaReplicas, kafkaPods);
ClientUtils.waitTillContinuousClientsFinish(producerName, consumerName, INFRA_NAMESPACE, messageCount);
}
use of io.strimzi.systemtest.Constants.INFRA_NAMESPACE in project strimzi by strimzi.
the class HttpBridgeIsolatedST method testCustomBridgeLabelsAreProperlySet.
@ParallelTest
void testCustomBridgeLabelsAreProperlySet(ExtensionContext extensionContext) {
final String bridgeName = "bridge-" + mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaBridgeTemplates.kafkaBridge(bridgeName, KafkaResources.plainBootstrapAddress(httpBridgeClusterName), 1).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().build());
// get service with custom labels
final Service kafkaBridgeService = kubeClient(INFRA_NAMESPACE).getService(INFRA_NAMESPACE, KafkaBridgeResources.serviceName(bridgeName));
// filter only app-bar service
final Map<String, String> filteredActualKafkaBridgeCustomLabels = kafkaBridgeService.getMetadata().getLabels().entrySet().stream().filter(item -> item.getKey().equals("app") && item.getValue().equals("bar")).collect(Collectors.toMap(item -> item.getKey(), item -> item.getValue()));
final Map<String, String> filteredActualKafkaBridgeCustomAnnotations = kafkaBridgeService.getMetadata().getAnnotations().entrySet().stream().filter(item -> item.getKey().equals("bar") && item.getValue().equals("app")).collect(Collectors.toMap(item -> item.getKey(), item -> item.getValue()));
// verify phase: that inside KafkaBridge we can find 'exceptedKafkaBridgeCustomLabels' and 'exceptedKafkaBridgeCustomAnnotations' previously defined
assertThat(filteredActualKafkaBridgeCustomLabels.size(), is(Collections.singletonMap("app", "bar").size()));
assertThat(filteredActualKafkaBridgeCustomAnnotations.size(), is(Collections.singletonMap("bar", "app").size()));
assertThat(filteredActualKafkaBridgeCustomLabels, is(Collections.singletonMap("app", "bar")));
assertThat(filteredActualKafkaBridgeCustomAnnotations, is(Collections.singletonMap("bar", "app")));
}
Aggregations