Search in sources :

Example 96 with Group

use of io.fabric8.kubernetes.model.annotation.Group in project strimzi by strimzi.

the class ListenersST method testCustomCertNodePortAndTlsRollingUpdate.

@ParallelNamespaceTest
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
@SuppressWarnings({ "checkstyle:MethodLength" })
void testCustomCertNodePortAndTlsRollingUpdate(ExtensionContext extensionContext) {
    final TestStorage testStorage = new TestStorage(extensionContext);
    final String clusterCustomCertServer1 = testStorage.getClusterName() + "-" + customCertServer1;
    final String clusterCustomCertServer2 = testStorage.getClusterName() + "-" + customCertServer2;
    SecretUtils.createCustomSecret(clusterCustomCertServer1, testStorage.getClusterName(), testStorage.getNamespaceName(), STRIMZI_CERT_AND_KEY_1);
    SecretUtils.createCustomSecret(clusterCustomCertServer2, testStorage.getClusterName(), testStorage.getNamespaceName(), STRIMZI_CERT_AND_KEY_2);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9115).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9116).withType(KafkaListenerType.NODEPORT).withTls(true).build()).endKafka().endSpec().build());
    KafkaUser aliceUser = KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build();
    resourceManager.createResource(extensionContext, aliceUser);
    String externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
    String externalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), testStorage.getClusterName() + "-cluster-ca-cert", "ca.crt");
    String internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
    LOGGER.info("Check if KafkaStatus certificates from external listeners are the same as secret certificates");
    assertThat(externalSecretCerts, is(externalCerts));
    LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
    // External secret cert is same as internal in this case
    assertThat(externalSecretCerts, is(internalCerts));
    ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(testStorage.getTopicName()).withNamespaceName(testStorage.getNamespaceName()).withClusterName(testStorage.getClusterName()).withKafkaUsername(testStorage.getUserName()).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
    externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls(), externalKafkaClient.receiveMessagesTls());
    Map<String, String> kafkaSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
    KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> {
        kafka.getSpec().getKafka().setListeners(asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9115).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer2).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9116).withType(KafkaListenerType.NODEPORT).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer1).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build()));
    }, testStorage.getNamespaceName());
    kafkaSnapshot = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaSnapshot);
    KafkaUtils.waitForKafkaStatusUpdate(testStorage.getNamespaceName(), testStorage.getClusterName());
    externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
    externalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), clusterCustomCertServer1, "ca.crt");
    internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
    String internalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), clusterCustomCertServer2, "ca.crt");
    LOGGER.info("Check if KafkaStatus certificates are the same as secret certificates");
    assertThat(externalSecretCerts, is(externalCerts));
    LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
    assertThat(internalSecretCerts, is(internalCerts));
    externalKafkaClient = externalKafkaClient.toBuilder().withCertificateAuthorityCertificateName(clusterCustomCertServer1).build();
    externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls(), externalKafkaClient.receiveMessagesTls());
    int expectedMessageCountForNewGroup = MESSAGE_COUNT * 3;
    KafkaClients kafkaClients = new KafkaClientsBuilder().withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withBootstrapAddress(KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9115").withMessageCount(MESSAGE_COUNT).withUserName(testStorage.getUserName()).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withConsumerGroup("consumer-group-certs-71").withCaCertSecretName(clusterCustomCertServer2).build();
    resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    int expectedMessageCountForExternalClient = MESSAGE_COUNT;
    kafkaClients = new KafkaClientsBuilder(kafkaClients).withMessageCount(expectedMessageCountForNewGroup).build();
    resourceManager.createResource(extensionContext, kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT * 3);
    SecretUtils.createCustomSecret(clusterCustomCertServer1, testStorage.getClusterName(), testStorage.getNamespaceName(), STRIMZI_CERT_AND_KEY_2);
    SecretUtils.createCustomSecret(clusterCustomCertServer2, testStorage.getClusterName(), testStorage.getNamespaceName(), STRIMZI_CERT_AND_KEY_1);
    kafkaSnapshot = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaSnapshot);
    externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
    externalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), clusterCustomCertServer1, "ca.crt");
    internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
    internalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), clusterCustomCertServer2, "ca.crt");
    LOGGER.info("Check if KafkaStatus certificates are the same as secret certificates");
    assertThat(externalSecretCerts, is(externalCerts));
    LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
    assertThat(internalSecretCerts, is(internalCerts));
    externalKafkaClient.verifyProducedAndConsumedMessages(expectedMessageCountForExternalClient, externalKafkaClient.receiveMessagesTls());
    kafkaClients = new KafkaClientsBuilder(kafkaClients).withConsumerGroup("consumer-group-certs-72").withMessageCount(MESSAGE_COUNT).build();
    resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    expectedMessageCountForNewGroup += MESSAGE_COUNT;
    kafkaClients = new KafkaClientsBuilder(kafkaClients).withMessageCount(expectedMessageCountForNewGroup).build();
    resourceManager.createResource(extensionContext, kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> {
        kafka.getSpec().getKafka().setListeners(asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9115).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer2).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9116).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
    }, testStorage.getNamespaceName());
    RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaSnapshot);
    KafkaUtils.waitForKafkaStatusUpdate(testStorage.getNamespaceName(), testStorage.getClusterName());
    externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
    externalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), testStorage.getClusterName() + "-cluster-ca-cert", "ca.crt");
    internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
    internalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), clusterCustomCertServer2, "ca.crt");
    LOGGER.info("Check if KafkaStatus certificates are the same as secret certificates");
    assertThat(externalSecretCerts, is(externalCerts));
    LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
    assertThat(internalSecretCerts, is(internalCerts));
    externalKafkaClient = externalKafkaClient.toBuilder().withCertificateAuthorityCertificateName(null).build();
    externalKafkaClient.verifyProducedAndConsumedMessages(expectedMessageCountForExternalClient, externalKafkaClient.receiveMessagesTls());
    kafkaClients = new KafkaClientsBuilder(kafkaClients).withConsumerGroup("consumer-group-certs-73").withMessageCount(expectedMessageCountForNewGroup).build();
    resourceManager.createResource(extensionContext, kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) ContainerEnvVarBuilder(io.strimzi.api.kafka.model.ContainerEnvVarBuilder) GenericKafkaListenerConfigurationBrokerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBrokerBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) SecretBuilder(io.fabric8.kubernetes.api.model.SecretBuilder) TestStorage(io.strimzi.systemtest.storage.TestStorage) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) KafkaUser(io.strimzi.api.kafka.model.KafkaUser) KRaftNotSupported(io.strimzi.systemtest.annotations.KRaftNotSupported) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 97 with Group

use of io.fabric8.kubernetes.model.annotation.Group in project strimzi by strimzi.

the class AbstractModel method createStatefulPod.

/**
 * Generates a stateful Pod. Unlike regular pods or pod templates, it uses some additional fields to configure
 * things such as subdomain etc. which are normally generated by StatefulSets.
 *
 * @param strimziPodSetName Name of the StrimziPodSet which will control this pod. This is used for labeling
 * @param podName           Name of the pod
 * @param podAnnotations    Map with annotation which should be set on the Pods
 * @param volumes           Function which returns a list of volumes which should be used by the Pod and its containers
 * @param affinity          Affinity rules for the pods
 * @param initContainers    List of init containers which should be used in the pods
 * @param containers        List of containers which should be used in the pods
 * @param imagePullSecrets  List of image pull secrets with container registry credentials
 * @param isOpenShift       Flag to specify whether we are on OpenShift or not
 *
 * @return                  Generated pod for a use within StrimziPodSet
 */
protected Pod createStatefulPod(String strimziPodSetName, String podName, Map<String, String> podAnnotations, List<Volume> volumes, Affinity affinity, List<Container> initContainers, List<Container> containers, List<LocalObjectReference> imagePullSecrets, boolean isOpenShift) {
    PodSecurityContext securityContext = templateSecurityContext;
    // This is to give each pod write permissions under a specific group so that if a pod changes users it does not have permission issues.
    if (ModelUtils.containsPersistentStorage(storage) && !isOpenShift && securityContext == null) {
        securityContext = new PodSecurityContextBuilder().withFsGroup(AbstractModel.DEFAULT_FS_GROUPID).build();
    }
    Pod pod = new PodBuilder().withNewMetadata().withName(podName).withLabels(getLabelsWithStrimziNameAndPodName(name, podName, templatePodLabels).withStatefulSetPod(podName).withStrimziPodSetController(strimziPodSetName).toMap()).withNamespace(namespace).withAnnotations(Util.mergeLabelsOrAnnotations(podAnnotations, templatePodAnnotations)).endMetadata().withNewSpec().withRestartPolicy("Always").withHostname(podName).withSubdomain(headlessServiceName).withServiceAccountName(getServiceAccountName()).withEnableServiceLinks(templatePodEnableServiceLinks).withAffinity(affinity).withInitContainers(initContainers).withContainers(containers).withVolumes(volumes).withTolerations(getTolerations()).withTerminationGracePeriodSeconds(Long.valueOf(templateTerminationGracePeriodSeconds)).withImagePullSecrets(templateImagePullSecrets != null ? templateImagePullSecrets : imagePullSecrets).withSecurityContext(securityContext).withPriorityClassName(templatePodPriorityClassName).withSchedulerName(templatePodSchedulerName != null ? templatePodSchedulerName : "default-scheduler").withHostAliases(templatePodHostAliases).withTopologySpreadConstraints(templatePodTopologySpreadConstraints).endSpec().build();
    // Set the pod revision annotation
    pod.getMetadata().getAnnotations().put(PodRevision.STRIMZI_REVISION_ANNOTATION, PodRevision.getRevision(reconciliation, pod));
    return pod;
}
Also used : PodSecurityContextBuilder(io.fabric8.kubernetes.api.model.PodSecurityContextBuilder) Pod(io.fabric8.kubernetes.api.model.Pod) PodSecurityContext(io.fabric8.kubernetes.api.model.PodSecurityContext) PodBuilder(io.fabric8.kubernetes.api.model.PodBuilder)

Example 98 with Group

use of io.fabric8.kubernetes.model.annotation.Group in project apicurio-registry by Apicurio.

the class OperatorUtils method createOperatorGroup.

public static OperatorGroup createOperatorGroup(ExtensionContext testContext, String namespace) {
    String name = namespace + "-operator-group";
    LOGGER.info("Creating operator group {} in namespace {} targeting namespace {}...", name, namespace, namespace);
    OperatorGroup operatorGroup = new OperatorGroupBuilder().withNewMetadata().withName(name).withNamespace(namespace).endMetadata().withNewSpec().withTargetNamespaces(namespace).endSpec().build();
    ResourceManager.getInstance().createResource(testContext, true, operatorGroup);
    return operatorGroup;
}
Also used : OperatorGroup(io.fabric8.openshift.api.model.operatorhub.v1.OperatorGroup) OperatorGroupBuilder(io.fabric8.openshift.api.model.operatorhub.v1.OperatorGroupBuilder)

Example 99 with Group

use of io.fabric8.kubernetes.model.annotation.Group in project droolsjbpm-integration by kiegroup.

the class ConfigMapLockUtilsTest method methodsTest.

@Test
public void methodsTest() {
    String groupName = "drools-group";
    String leader = "leader-x13X";
    Date timestamp = Calendar.getInstance().getTime();
    Set<String> members = new HashSet<>(Arrays.asList("Qui", "Quo", "Qua"));
    LeaderInfo info = new LeaderInfo(groupName, leader, timestamp, members);
    logger.info("leaderInfo:{}", info.toString());
    assertFalse(info.hasEmptyLeader());
    assertFalse(info.isValidLeader(null));
    assertFalse(info.hasValidLeader());
    assertEquals(groupName, info.getGroupName());
    assertEquals(leader, info.getLeader());
    assertEquals(timestamp, info.getLocalTimestamp());
    assertEquals(members, info.getMembers());
    ConfigMap configMap = ConfigMapLockUtils.createNewConfigMap("my-map", info);
    assertNotNull(configMap);
    LeaderInfo leaderInfo = ConfigMapLockUtils.getLeaderInfo(configMap, members, groupName);
    logger.info("leaderInfo:{}", leaderInfo.toString());
    assertNotNull(leaderInfo);
    ConfigMap newConfigMap = ConfigMapLockUtils.getConfigMapWithNewLeader(configMap, leaderInfo);
    assertNotNull(newConfigMap);
}
Also used : ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) Date(java.util.Date) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 100 with Group

use of io.fabric8.kubernetes.model.annotation.Group in project strimzi-kafka-operator by strimzi.

the class AbstractModel method createPod.

protected Pod createPod(String name, Map<String, String> podAnnotations, List<Volume> volumes, List<Container> initContainers, List<Container> containers, List<LocalObjectReference> imagePullSecrets, boolean isOpenShift) {
    PodSecurityContext securityContext = templateSecurityContext;
    // This is to give each pod write permissions under a specific group so that if a pod changes users it does not have permission issues.
    if (ModelUtils.containsPersistentStorage(storage) && !isOpenShift && securityContext == null) {
        securityContext = new PodSecurityContextBuilder().withFsGroup(AbstractModel.DEFAULT_FS_GROUPID).build();
    }
    Pod pod = new PodBuilder().withNewMetadata().withName(name).withLabels(getLabelsWithStrimziName(name, templatePodLabels).toMap()).withNamespace(namespace).withAnnotations(Util.mergeLabelsOrAnnotations(podAnnotations, templatePodAnnotations)).withOwnerReferences(createOwnerReference()).endMetadata().withNewSpec().withRestartPolicy("Never").withServiceAccountName(getServiceAccountName()).withEnableServiceLinks(templatePodEnableServiceLinks).withAffinity(getUserAffinity()).withInitContainers(initContainers).withContainers(containers).withVolumes(volumes).withTolerations(getTolerations()).withTerminationGracePeriodSeconds(Long.valueOf(templateTerminationGracePeriodSeconds)).withImagePullSecrets(templateImagePullSecrets != null ? templateImagePullSecrets : imagePullSecrets).withSecurityContext(securityContext).withPriorityClassName(templatePodPriorityClassName).withSchedulerName(templatePodSchedulerName != null ? templatePodSchedulerName : "default-scheduler").endSpec().build();
    return pod;
}
Also used : PodSecurityContextBuilder(io.fabric8.kubernetes.api.model.PodSecurityContextBuilder) Pod(io.fabric8.kubernetes.api.model.Pod) PodSecurityContext(io.fabric8.kubernetes.api.model.PodSecurityContext) PodBuilder(io.fabric8.kubernetes.api.model.PodBuilder)

Aggregations

Test (org.junit.jupiter.api.Test)32 Test (org.junit.Test)29 IOException (java.io.IOException)19 ArrayList (java.util.ArrayList)17 Map (java.util.Map)16 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)15 List (java.util.List)14 File (java.io.File)13 HashMap (java.util.HashMap)13 KubernetesList (io.fabric8.kubernetes.api.model.KubernetesList)12 Collectors (java.util.stream.Collectors)11 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)10 Deployment (io.fabric8.kubernetes.api.model.apps.Deployment)10 HasMetadata (io.fabric8.kubernetes.api.model.HasMetadata)9 KubernetesListBuilder (io.fabric8.kubernetes.api.model.KubernetesListBuilder)9 CuratorFramework (org.apache.curator.framework.CuratorFramework)9 ZooKeeperGroup (io.fabric8.groups.internal.ZooKeeperGroup)8 Pod (io.fabric8.kubernetes.api.model.Pod)8 KubernetesClientBuilder (io.fabric8.kubernetes.client.KubernetesClientBuilder)8 OpenShiftClient (io.fabric8.openshift.client.OpenShiftClient)8