use of io.fabric8.kubernetes.model.annotation.Group in project strimzi by strimzi.
the class MockKube method build.
@SuppressWarnings("unchecked")
@SuppressFBWarnings("BC_UNCONFIRMED_CAST_OF_RETURN_VALUE")
public KubernetesClient build() {
if (mockClient != null) {
return mockClient;
}
configMapMockBuilder = addMockBuilder("configmaps", new MockBuilder<>(ConfigMap.class, ConfigMapList.class, MockBuilder.castClass(Resource.class), cmDb));
endpointMockBuilder = addMockBuilder("endpoints", new MockBuilder<>(Endpoints.class, EndpointsList.class, MockBuilder.castClass(Resource.class), endpointDb));
serviceMockBuilder = addMockBuilder("services", new ServiceMockBuilder(svcDb, endpointDb));
secretMockBuilder = addMockBuilder("secrets", new MockBuilder<>(Secret.class, SecretList.class, MockBuilder.castClass(Resource.class), secretDb));
serviceAccountMockBuilder = addMockBuilder("serviceaccounts", new MockBuilder<>(ServiceAccount.class, ServiceAccountList.class, MockBuilder.castClass(Resource.class), serviceAccountDb));
routeMockBuilder = addMockBuilder("routes", new MockBuilder<>(Route.class, RouteList.class, MockBuilder.castClass(Resource.class), routeDb));
buildConfigMockBuilder = addMockBuilder("buildConfigs", new MockBuilder<>(BuildConfig.class, BuildConfigList.class, MockBuilder.castClass(Resource.class), buildConfigDb));
podDisruptionBudgedMockBuilder = addMockBuilder("poddisruptionbudgets", new MockBuilder<>(PodDisruptionBudget.class, PodDisruptionBudgetList.class, MockBuilder.castClass(Resource.class), pdbDb));
podDisruptionBudgedV1Beta1MockBuilder = addMockBuilder("poddisruptionbudgetsV1Beta1", new MockBuilder<>(io.fabric8.kubernetes.api.model.policy.v1beta1.PodDisruptionBudget.class, io.fabric8.kubernetes.api.model.policy.v1beta1.PodDisruptionBudgetList.class, MockBuilder.castClass(Resource.class), pdbDbV1Beta1));
roleBindingMockBuilder = addMockBuilder("rolebindings", new MockBuilder<>(RoleBinding.class, RoleBindingList.class, MockBuilder.castClass(Resource.class), pdbRb));
roleMockBuilder = addMockBuilder("roles", new MockBuilder<>(Role.class, RoleList.class, MockBuilder.castClass(Resource.class), roleDb));
clusterRoleBindingMockBuilder = addMockBuilder("clusterrolebindings", new MockBuilder<>(ClusterRoleBinding.class, ClusterRoleBindingList.class, MockBuilder.castClass(Resource.class), pdbCrb));
networkPolicyMockBuilder = addMockBuilder("networkpolicies", new MockBuilder<>(NetworkPolicy.class, NetworkPolicyList.class, MockBuilder.castClass(Resource.class), policyDb));
ingressMockBuilder = addMockBuilder("ingresses", new MockBuilder<>(Ingress.class, IngressList.class, MockBuilder.castClass(Resource.class), ingressDb));
ingressV1Beta1MockBuilder = addMockBuilder("ingresses", new MockBuilder<>(io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress.class, io.fabric8.kubernetes.api.model.networking.v1beta1.IngressList.class, MockBuilder.castClass(Resource.class), ingressV1Beta1Db));
podMockBuilder = addMockBuilder("pods", new MockBuilder<>(Pod.class, PodList.class, MockBuilder.castClass(PodResource.class), podDb));
MixedOperation<Pod, PodList, PodResource<Pod>> mockPods = podMockBuilder.build();
persistentVolumeClaimMockBuilder = addMockBuilder("persistentvolumeclaims", new MockBuilder<>(PersistentVolumeClaim.class, PersistentVolumeClaimList.class, MockBuilder.castClass(Resource.class), pvcDb));
MixedOperation<PersistentVolumeClaim, PersistentVolumeClaimList, Resource<PersistentVolumeClaim>> mockPersistentVolumeClaims = persistentVolumeClaimMockBuilder.build();
deploymentMockBuilder = addMockBuilder("deployments", new DeploymentMockBuilder(depDb, mockPods));
MixedOperation<StatefulSet, StatefulSetList, RollableScalableResource<StatefulSet>> mockSs = buildStatefulSets(podMockBuilder, mockPods, mockPersistentVolumeClaims);
// Top level group
mockClient = mock(KubernetesClient.class);
configMapMockBuilder.build2(mockClient::configMaps);
serviceMockBuilder.build2(mockClient::services);
secretMockBuilder.build2(mockClient::secrets);
serviceAccountMockBuilder.build2(mockClient::serviceAccounts);
when(mockClient.pods()).thenReturn(mockPods);
endpointMockBuilder.build2(mockClient::endpoints);
when(mockClient.persistentVolumeClaims()).thenReturn(mockPersistentVolumeClaims);
// API group
AppsAPIGroupDSL api = mock(AppsAPIGroupDSL.class);
when(mockClient.apps()).thenReturn(api);
when(api.statefulSets()).thenReturn(mockSs);
deploymentMockBuilder.build2(api::deployments);
MixedOperation<CustomResourceDefinition, CustomResourceDefinitionList, Resource<CustomResourceDefinition>> mockCrds = mock(MixedOperation.class);
// Custom Resources
if (mockedCrds != null && !mockedCrds.isEmpty()) {
NonNamespaceOperation<CustomResourceDefinition, CustomResourceDefinitionList, Resource<CustomResourceDefinition>> crds = mock(MixedOperation.class);
for (MockedCrd<?, ?, ?> mockedCrd : this.mockedCrds) {
CustomResourceDefinition crd = mockedCrd.crd;
Resource crdResource = mock(Resource.class);
when(crdResource.get()).thenReturn(crd);
when(crds.withName(crd.getMetadata().getName())).thenReturn(crdResource);
String key = crdKey(mockedCrd.crClass);
CreateOrReplaceable crdMixedOp = crdMixedOps.get(key);
if (crdMixedOp == null) {
CustomResourceMockBuilder customResourceMockBuilder = addMockBuilder(crd.getSpec().getNames().getPlural(), new CustomResourceMockBuilder<>((MockedCrd) mockedCrd));
crdMixedOp = (MixedOperation<CustomResource, ? extends KubernetesResource, Resource<CustomResource>>) customResourceMockBuilder.build();
crdMixedOps.put(key, crdMixedOp);
}
when(mockCrds.withName(eq(crd.getMetadata().getName()))).thenReturn(crdResource);
}
ApiextensionsAPIGroupDSL mockApiEx = mock(ApiextensionsAPIGroupDSL.class);
V1ApiextensionAPIGroupDSL mockv1 = mock(V1ApiextensionAPIGroupDSL.class);
when(mockClient.apiextensions()).thenReturn(mockApiEx);
when(mockApiEx.v1()).thenReturn(mockv1);
when(mockv1.customResourceDefinitions()).thenReturn(mockCrds);
mockCrs(mockClient);
}
// Network group
NetworkAPIGroupDSL network = mock(NetworkAPIGroupDSL.class);
V1NetworkAPIGroupDSL networkV1 = mock(V1NetworkAPIGroupDSL.class);
V1beta1NetworkAPIGroupDSL networkV1beta1 = mock(V1beta1NetworkAPIGroupDSL.class);
when(mockClient.network()).thenReturn(network);
when(network.v1()).thenReturn(networkV1);
when(network.v1beta1()).thenReturn(networkV1beta1);
networkPolicyMockBuilder.build2(network::networkPolicies);
ingressMockBuilder.build2(networkV1::ingresses);
ingressV1Beta1MockBuilder.build2(networkV1beta1::ingresses);
// Policy group
PolicyAPIGroupDSL policy = mock(PolicyAPIGroupDSL.class);
V1PolicyAPIGroupDSL v1policy = mock(V1PolicyAPIGroupDSL.class);
when(mockClient.policy()).thenReturn(policy);
when(policy.v1()).thenReturn(v1policy);
V1beta1PolicyAPIGroupDSL v1beta1policy = mock(V1beta1PolicyAPIGroupDSL.class);
when(mockClient.policy()).thenReturn(policy);
when(policy.v1beta1()).thenReturn(v1beta1policy);
podDisruptionBudgedMockBuilder.build2(mockClient.policy().v1()::podDisruptionBudget);
podDisruptionBudgedV1Beta1MockBuilder.build2(mockClient.policy().v1beta1()::podDisruptionBudget);
// RBAC group
RbacAPIGroupDSL rbac = mock(RbacAPIGroupDSL.class);
when(mockClient.rbac()).thenReturn(rbac);
roleBindingMockBuilder.build2(mockClient.rbac()::roleBindings);
roleMockBuilder.build2(mockClient.rbac()::roles);
clusterRoleBindingMockBuilder.buildNns(mockClient.rbac()::clusterRoleBindings);
// Openshift group
OpenShiftClient mockOpenShiftClient = mock(OpenShiftClient.class);
when(mockClient.adapt(OpenShiftClient.class)).thenReturn(mockOpenShiftClient);
routeMockBuilder.build2(mockOpenShiftClient::routes);
buildConfigMockBuilder.build2(mockOpenShiftClient::buildConfigs);
if (mockedCrds != null && !mockedCrds.isEmpty()) {
ApiextensionsAPIGroupDSL mockApiEx = mock(ApiextensionsAPIGroupDSL.class);
V1ApiextensionAPIGroupDSL mockv1 = mock(V1ApiextensionAPIGroupDSL.class);
when(mockOpenShiftClient.apiextensions()).thenReturn(mockApiEx);
when(mockApiEx.v1()).thenReturn(mockv1);
when(mockv1.customResourceDefinitions()).thenReturn(mockCrds);
mockCrs(mockOpenShiftClient);
}
doAnswer(i -> {
for (MockBuilder<?, ?, ?> a : mockBuilders.values()) {
a.assertNoWatchers();
}
return null;
}).when(mockClient).close();
return mockClient;
}
use of io.fabric8.kubernetes.model.annotation.Group in project strimzi by strimzi.
the class SecurityST method testAclRuleReadAndWrite.
@ParallelNamespaceTest
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testAclRuleReadAndWrite(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaUserWrite = "kafka-user-write";
final String kafkaUserRead = "kafka-user-read";
final int numberOfMessages = 500;
final String consumerGroupName = "consumer-group-name-1";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).editSpec().editKafka().withNewKafkaAuthorizationSimple().endKafkaAuthorizationSimple().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withAuth(new KafkaListenerAuthenticationTls()).build()).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, kafkaUserWrite).editSpec().withNewKafkaUserAuthorizationSimple().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.WRITE).endAcl().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(// describe is for that user can find out metadata
AclOperation.DESCRIBE).endAcl().endKafkaUserAuthorizationSimple().endSpec().build());
LOGGER.info("Checking KafkaUser {} that is able to send messages to topic '{}'", kafkaUserWrite, topicName);
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(kafkaUserWrite).withMessageCount(numberOfMessages).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
assertThat(externalKafkaClient.sendMessagesTls(), is(numberOfMessages));
assertThrows(GroupAuthorizationException.class, externalKafkaClient::receiveMessagesTls);
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, kafkaUserRead).editSpec().withNewKafkaUserAuthorizationSimple().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.READ).endAcl().addNewAcl().withNewAclRuleGroupResource().withName(consumerGroupName).endAclRuleGroupResource().withOperation(AclOperation.READ).endAcl().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(// s describe is for that user can find out metadata
AclOperation.DESCRIBE).endAcl().endKafkaUserAuthorizationSimple().endSpec().build());
ExternalKafkaClient newExternalKafkaClient = externalKafkaClient.toBuilder().withKafkaUsername(kafkaUserRead).withConsumerGroupName(consumerGroupName).build();
assertThat(newExternalKafkaClient.receiveMessagesTls(), is(numberOfMessages));
LOGGER.info("Checking KafkaUser {} that is not able to send messages to topic '{}'", kafkaUserRead, topicName);
assertThrows(Exception.class, newExternalKafkaClient::sendMessagesTls);
}
use of io.fabric8.kubernetes.model.annotation.Group in project strimzi by strimzi.
the class Crds method crd.
@SuppressWarnings({ "checkstyle:JavaNCSS" })
private static CustomResourceDefinition crd(Class<? extends CustomResource> cls) {
String scope, plural, singular, group, kind, listKind;
List<String> versions;
CustomResourceSubresourceStatus status = null;
if (cls.equals(Kafka.class)) {
scope = Kafka.SCOPE;
plural = Kafka.RESOURCE_PLURAL;
singular = Kafka.RESOURCE_SINGULAR;
group = Kafka.RESOURCE_GROUP;
kind = Kafka.RESOURCE_KIND;
listKind = Kafka.RESOURCE_LIST_KIND;
versions = Kafka.VERSIONS;
status = new CustomResourceSubresourceStatus();
} else if (cls.equals(KafkaConnect.class)) {
scope = KafkaConnect.SCOPE;
plural = KafkaConnect.RESOURCE_PLURAL;
singular = KafkaConnect.RESOURCE_SINGULAR;
group = KafkaConnect.RESOURCE_GROUP;
kind = KafkaConnect.RESOURCE_KIND;
listKind = KafkaConnect.RESOURCE_LIST_KIND;
versions = KafkaConnect.VERSIONS;
status = new CustomResourceSubresourceStatus();
} else if (cls.equals(KafkaTopic.class)) {
scope = KafkaTopic.SCOPE;
plural = KafkaTopic.RESOURCE_PLURAL;
singular = KafkaTopic.RESOURCE_SINGULAR;
group = KafkaTopic.RESOURCE_GROUP;
kind = KafkaTopic.RESOURCE_KIND;
listKind = KafkaTopic.RESOURCE_LIST_KIND;
versions = KafkaTopic.VERSIONS;
} else if (cls.equals(KafkaUser.class)) {
scope = KafkaUser.SCOPE;
plural = KafkaUser.RESOURCE_PLURAL;
singular = KafkaUser.RESOURCE_SINGULAR;
group = KafkaUser.RESOURCE_GROUP;
kind = KafkaUser.RESOURCE_KIND;
listKind = KafkaUser.RESOURCE_LIST_KIND;
versions = KafkaUser.VERSIONS;
status = new CustomResourceSubresourceStatus();
} else if (cls.equals(KafkaMirrorMaker.class)) {
scope = KafkaMirrorMaker.SCOPE;
plural = KafkaMirrorMaker.RESOURCE_PLURAL;
singular = KafkaMirrorMaker.RESOURCE_SINGULAR;
group = KafkaMirrorMaker.RESOURCE_GROUP;
kind = KafkaMirrorMaker.RESOURCE_KIND;
listKind = KafkaMirrorMaker.RESOURCE_LIST_KIND;
versions = KafkaMirrorMaker.VERSIONS;
status = new CustomResourceSubresourceStatus();
} else if (cls.equals(KafkaBridge.class)) {
scope = KafkaBridge.SCOPE;
plural = KafkaBridge.RESOURCE_PLURAL;
singular = KafkaBridge.RESOURCE_SINGULAR;
group = KafkaBridge.RESOURCE_GROUP;
kind = KafkaBridge.RESOURCE_KIND;
listKind = KafkaBridge.RESOURCE_LIST_KIND;
versions = KafkaBridge.VERSIONS;
status = new CustomResourceSubresourceStatus();
} else if (cls.equals(KafkaConnector.class)) {
scope = KafkaConnector.SCOPE;
plural = KafkaConnector.RESOURCE_PLURAL;
singular = KafkaConnector.RESOURCE_SINGULAR;
group = KafkaConnector.RESOURCE_GROUP;
kind = KafkaConnector.RESOURCE_KIND;
listKind = KafkaConnector.RESOURCE_LIST_KIND;
versions = KafkaConnector.VERSIONS;
status = new CustomResourceSubresourceStatus();
} else if (cls.equals(KafkaMirrorMaker2.class)) {
scope = KafkaMirrorMaker2.SCOPE;
plural = KafkaMirrorMaker2.RESOURCE_PLURAL;
singular = KafkaMirrorMaker2.RESOURCE_SINGULAR;
group = KafkaMirrorMaker2.RESOURCE_GROUP;
kind = KafkaMirrorMaker2.RESOURCE_KIND;
listKind = KafkaMirrorMaker2.RESOURCE_LIST_KIND;
versions = KafkaMirrorMaker2.VERSIONS;
status = new CustomResourceSubresourceStatus();
} else if (cls.equals(KafkaRebalance.class)) {
scope = KafkaRebalance.SCOPE;
plural = KafkaRebalance.RESOURCE_PLURAL;
singular = KafkaRebalance.RESOURCE_SINGULAR;
group = KafkaRebalance.RESOURCE_GROUP;
kind = KafkaRebalance.RESOURCE_KIND;
listKind = KafkaRebalance.RESOURCE_LIST_KIND;
versions = KafkaRebalance.VERSIONS;
status = new CustomResourceSubresourceStatus();
} else if (cls.equals(StrimziPodSet.class)) {
scope = StrimziPodSet.SCOPE;
plural = StrimziPodSet.RESOURCE_PLURAL;
singular = StrimziPodSet.RESOURCE_SINGULAR;
group = StrimziPodSet.RESOURCE_GROUP;
kind = StrimziPodSet.RESOURCE_KIND;
listKind = StrimziPodSet.RESOURCE_LIST_KIND;
versions = StrimziPodSet.VERSIONS;
status = new CustomResourceSubresourceStatus();
} else {
throw new RuntimeException();
}
List<CustomResourceDefinitionVersion> crVersions = new ArrayList<>(versions.size());
for (String apiVersion : versions) {
crVersions.add(new CustomResourceDefinitionVersionBuilder().withName(apiVersion).withNewSubresources().withStatus(status).endSubresources().withNewSchema().withNewOpenAPIV3Schema().withType("object").withXKubernetesPreserveUnknownFields(true).endOpenAPIV3Schema().endSchema().withStorage("v1beta2".equals(apiVersion)).withServed(true).build());
}
return new CustomResourceDefinitionBuilder().withNewMetadata().withName(plural + "." + group).endMetadata().withNewSpec().withScope(scope).withGroup(group).withVersions(crVersions).withNewNames().withSingular(singular).withPlural(plural).withKind(kind).withListKind(listKind).endNames().endSpec().build();
}
use of io.fabric8.kubernetes.model.annotation.Group in project strimzi by strimzi.
the class AbstractModel method createPod.
protected Pod createPod(String name, Map<String, String> podAnnotations, List<Volume> volumes, List<Container> initContainers, List<Container> containers, List<LocalObjectReference> imagePullSecrets, boolean isOpenShift) {
PodSecurityContext securityContext = templateSecurityContext;
// This is to give each pod write permissions under a specific group so that if a pod changes users it does not have permission issues.
if (ModelUtils.containsPersistentStorage(storage) && !isOpenShift && securityContext == null) {
securityContext = new PodSecurityContextBuilder().withFsGroup(AbstractModel.DEFAULT_FS_GROUPID).build();
}
Pod pod = new PodBuilder().withNewMetadata().withName(name).withLabels(getLabelsWithStrimziName(name, templatePodLabels).toMap()).withNamespace(namespace).withAnnotations(Util.mergeLabelsOrAnnotations(podAnnotations, templatePodAnnotations)).withOwnerReferences(createOwnerReference()).endMetadata().withNewSpec().withRestartPolicy("Never").withServiceAccountName(getServiceAccountName()).withEnableServiceLinks(templatePodEnableServiceLinks).withAffinity(getUserAffinity()).withInitContainers(initContainers).withContainers(containers).withVolumes(volumes).withTolerations(getTolerations()).withTerminationGracePeriodSeconds(Long.valueOf(templateTerminationGracePeriodSeconds)).withImagePullSecrets(templateImagePullSecrets != null ? templateImagePullSecrets : imagePullSecrets).withSecurityContext(securityContext).withPriorityClassName(templatePodPriorityClassName).withSchedulerName(templatePodSchedulerName != null ? templatePodSchedulerName : "default-scheduler").endSpec().build();
return pod;
}
use of io.fabric8.kubernetes.model.annotation.Group in project strimzi by strimzi.
the class AbstractModel method createStatefulSet.
protected StatefulSet createStatefulSet(Map<String, String> stsAnnotations, Map<String, String> podAnnotations, List<Volume> volumes, List<PersistentVolumeClaim> volumeClaims, Affinity affinity, List<Container> initContainers, List<Container> containers, List<LocalObjectReference> imagePullSecrets, boolean isOpenShift) {
PodSecurityContext securityContext = templateSecurityContext;
// This is to give each pod write permissions under a specific group so that if a pod changes users it does not have permission issues.
if (ModelUtils.containsPersistentStorage(storage) && !isOpenShift && securityContext == null) {
securityContext = new PodSecurityContextBuilder().withFsGroup(AbstractModel.DEFAULT_FS_GROUPID).build();
}
StatefulSet statefulSet = new StatefulSetBuilder().withNewMetadata().withName(name).withLabels(getLabelsWithStrimziName(name, templateStatefulSetLabels).toMap()).withNamespace(namespace).withAnnotations(Util.mergeLabelsOrAnnotations(stsAnnotations, templateStatefulSetAnnotations)).withOwnerReferences(createOwnerReference()).endMetadata().withNewSpec().withPodManagementPolicy(templatePodManagementPolicy.toValue()).withUpdateStrategy(new StatefulSetUpdateStrategyBuilder().withType("OnDelete").build()).withSelector(new LabelSelectorBuilder().withMatchLabels(getSelectorLabels().toMap()).build()).withServiceName(headlessServiceName).withReplicas(replicas).withNewTemplate().withNewMetadata().withName(name).withLabels(getLabelsWithStrimziName(name, templatePodLabels).toMap()).withAnnotations(Util.mergeLabelsOrAnnotations(podAnnotations, templatePodAnnotations)).endMetadata().withNewSpec().withServiceAccountName(getServiceAccountName()).withEnableServiceLinks(templatePodEnableServiceLinks).withAffinity(affinity).withInitContainers(initContainers).withContainers(containers).withVolumes(volumes).withTolerations(getTolerations()).withTerminationGracePeriodSeconds(Long.valueOf(templateTerminationGracePeriodSeconds)).withImagePullSecrets(templateImagePullSecrets != null ? templateImagePullSecrets : imagePullSecrets).withSecurityContext(securityContext).withPriorityClassName(templatePodPriorityClassName).withSchedulerName(templatePodSchedulerName != null ? templatePodSchedulerName : "default-scheduler").withHostAliases(templatePodHostAliases).withTopologySpreadConstraints(templatePodTopologySpreadConstraints).endSpec().endTemplate().withVolumeClaimTemplates(volumeClaims).endSpec().build();
return statefulSet;
}
Aggregations