use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi-kafka-operator by strimzi.
the class OauthPlainIsolatedST method testSaslPlainAuthenticationKafkaConnectIsAbleToConnectToKafkaOAuth.
@ParallelTest
void testSaslPlainAuthenticationKafkaConnectIsAbleToConnectToKafkaOAuth(ExtensionContext extensionContext) {
TestStorage testStorage = new TestStorage(extensionContext);
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, testStorage.getKafkaClientsName()).build());
resourceManager.createResource(extensionContext, false, KafkaConnectTemplates.kafkaConnect(extensionContext, testStorage.getClusterName(), oauthClusterName, 1).withNewSpec().withReplicas(1).withBootstrapServers(KafkaResources.plainBootstrapAddress(oauthClusterName)).withConfig(connectorConfig).addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").withNewKafkaClientAuthenticationPlain().withUsername("kafka-connect").withNewPasswordSecret().withSecretName(CONNECT_OAUTH_SECRET).withPassword("clientSecret").endPasswordSecret().endKafkaClientAuthenticationPlain().withTls(null).endSpec().build());
// verify that KafkaConnect is able to connect to Oauth Kafka configured as plain
KafkaConnectUtils.waitForConnectReady(testStorage.getNamespaceName(), testStorage.getClusterName());
}
use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi-kafka-operator by strimzi.
the class UserST method testScramUserWithQuotas.
@ParallelTest
void testScramUserWithQuotas(ExtensionContext extensionContext) {
KafkaUser user = KafkaUserTemplates.scramShaUser(namespace, userClusterName, "scramed-arnost").build();
testUserWithQuotas(extensionContext, user);
}
use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi-kafka-operator by strimzi.
the class UserST method testUserTemplate.
@ParallelTest
void testUserTemplate(ExtensionContext extensionContext) {
String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
String labelKey = "test-label-key";
String labelValue = "test-label-value";
String annotationKey = "test-annotation-key";
String annotationValue = "test-annotation-value";
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(namespace, userClusterName, userName).editSpec().editOrNewTemplate().editOrNewSecret().editOrNewMetadata().addToLabels(labelKey, labelValue).addToAnnotations(annotationKey, annotationValue).endMetadata().endSecret().endTemplate().endSpec().build());
Secret userSecret = kubeClient(namespace).getSecret(userName);
assertThat(userSecret.getMetadata().getLabels().get(labelKey), is(labelValue));
assertThat(userSecret.getMetadata().getAnnotations().get(annotationKey), is(annotationValue));
}
use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi-kafka-operator by strimzi.
the class UserST method testTlsUserWithQuotas.
@ParallelTest
void testTlsUserWithQuotas(ExtensionContext extensionContext) {
KafkaUser user = KafkaUserTemplates.tlsUser(namespace, userClusterName, "encrypted-arnost").build();
testUserWithQuotas(extensionContext, user);
}
use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi-kafka-operator by strimzi.
the class OpaIntegrationST method testOpaAuthorization.
@ParallelTest
void testOpaAuthorization(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String consumerGroupName = "consumer-group-name-1";
final String kafkaClientsDeploymentName = clusterName + "-" + Constants.KAFKA_CLIENTS;
// Deploy client pod with custom certificates and collect messages from internal TLS listener
KafkaUser goodUser = KafkaUserTemplates.tlsUser(namespace, CLUSTER_NAME, OPA_GOOD_USER).build();
KafkaUser badUser = KafkaUserTemplates.tlsUser(namespace, CLUSTER_NAME, OPA_BAD_USER).build();
resourceManager.createResource(extensionContext, goodUser);
resourceManager.createResource(extensionContext, badUser);
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, kafkaClientsDeploymentName, false, goodUser, badUser).editMetadata().withNamespace(namespace).endMetadata().build());
final String clientsPodName = kubeClient(namespace).listPodsByPrefixInName(namespace, kafkaClientsDeploymentName).get(0).getMetadata().getName();
LOGGER.info("Checking KafkaUser {} that is able to send and receive messages to/from topic '{}'", OPA_GOOD_USER, topicName);
// Setup kafka client
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(CLUSTER_NAME).withKafkaUsername(OPA_GOOD_USER).withMessageCount(MESSAGE_COUNT).withConsumerGroupName(consumerGroupName).withSecurityProtocol(SecurityProtocol.SSL).withUsingPodName(clientsPodName).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
internalKafkaClient.produceAndConsumesTlsMessagesUntilBothOperationsAreSuccessful();
LOGGER.info("Checking KafkaUser {} that is not able to send or receive messages to/from topic '{}'", OPA_BAD_USER, topicName);
internalKafkaClient = internalKafkaClient.toBuilder().withKafkaUsername(OPA_BAD_USER).build();
assertThat(internalKafkaClient.sendMessagesTls(), is(-1));
assertThat(internalKafkaClient.receiveMessagesTls(), is(0));
}
Aggregations