use of io.fabric8.kubernetes.model.annotation.Group in project strimzi by strimzi.
the class PlatformFeaturesAvailability method checkApiAvailability.
private static Future<Boolean> checkApiAvailability(Vertx vertx, KubernetesClient client, String group, String version) {
Promise<Boolean> promise = Promise.promise();
vertx.executeBlocking(request -> {
try {
APIGroup apiGroup = client.getApiGroup(group);
boolean supported;
if (apiGroup != null) {
supported = apiGroup.getVersions().stream().anyMatch(v -> version.equals(v.getVersion()));
} else {
supported = false;
}
LOGGER.warn("API Group {} is {}supported", group, supported ? "" : "not ");
request.complete(supported);
} catch (Exception e) {
LOGGER.error("Detection of API availability failed.", e);
request.fail(e);
}
}, promise);
return promise.future();
}
use of io.fabric8.kubernetes.model.annotation.Group in project strimzi by strimzi.
the class CrdGenerator method buildSpec.
@SuppressWarnings("NPathComplexity")
private ObjectNode buildSpec(ApiVersion crdApiVersion, Crd.Spec crd, Class<? extends CustomResource> crdClass) {
checkKubeVersionsSupportCrdVersion(crdApiVersion);
ObjectNode result = nf.objectNode();
result.put("group", crd.group());
ArrayNode versions = nf.arrayNode();
// Kube apiserver with CRD v1beta1 is picky about only using per-version subresources, schemas and printercolumns
// if they actually differ across the versions. If they're the same, it insists these things are
// declared top level
// With CRD v1 they have to be per-version :face-with-rolling-eyes:
Map<ApiVersion, ObjectNode> subresources = buildSubresources(crd);
boolean perVersionSubResources = needsPerVersion("subresources", subresources);
Map<ApiVersion, ObjectNode> schemas = buildSchemas(crd, crdClass);
boolean perVersionSchemas = needsPerVersion("schemas", schemas);
Map<ApiVersion, ArrayNode> printerColumns = buildPrinterColumns(crd);
boolean perVersionPrinterColumns = needsPerVersion("additionalPrinterColumns", printerColumns);
result.set("names", buildNames(crd.names()));
result.put("scope", crd.scope());
if (!perVersionPrinterColumns) {
ArrayNode cols = printerColumns.values().iterator().next();
if (!cols.isEmpty()) {
result.set("additionalPrinterColumns", cols);
}
}
if (!perVersionSubResources) {
ObjectNode subresource = subresources.values().iterator().next();
if (!subresource.isEmpty()) {
result.set("subresources", subresource);
}
}
if (conversionStrategy instanceof WebhookConversionStrategy) {
// "Webhook": must be None if spec.preserveUnknownFields is true
result.put("preserveUnknownFields", false);
}
result.set("conversion", buildConversion(crdApiVersion));
for (Crd.Spec.Version version : crd.versions()) {
ApiVersion crApiVersion = ApiVersion.parse(version.name());
if (!shouldIncludeVersion(crApiVersion)) {
continue;
}
ObjectNode versionNode = versions.addObject();
versionNode.put("name", crApiVersion.toString());
versionNode.put("served", servedVersion != null ? servedVersion.contains(crApiVersion) : version.served());
versionNode.put("storage", storageVersion != null ? crApiVersion.equals(storageVersion) : version.storage());
if (perVersionSubResources) {
ObjectNode subresourcesForVersion = subresources.get(crApiVersion);
if (!subresourcesForVersion.isEmpty()) {
versionNode.set("subresources", subresourcesForVersion);
}
}
if (perVersionPrinterColumns) {
ArrayNode cols = printerColumns.get(crApiVersion);
if (!cols.isEmpty()) {
versionNode.set("additionalPrinterColumns", cols);
}
}
if (perVersionSchemas) {
versionNode.set("schema", schemas.get(crApiVersion));
}
}
result.set("versions", versions);
if (crdApiVersion.compareTo(V1) < 0 && targetKubeVersions.intersects(KubeVersion.parseRange("1.11-1.15"))) {
result.put("version", Arrays.stream(crd.versions()).map(v -> ApiVersion.parse(v.name())).filter(this::shouldIncludeVersion).findFirst().map(ApiVersion::toString).orElseThrow());
}
if (!perVersionSchemas) {
result.set("validation", schemas.values().iterator().next());
}
return result;
}
use of io.fabric8.kubernetes.model.annotation.Group in project strimzi by strimzi.
the class OperatorMetricsTest method resourceOperatorWithExistingPausedResource.
private AbstractWatchableStatusedResourceOperator resourceOperatorWithExistingPausedResource() {
return new AbstractWatchableStatusedResourceOperator(vertx, null, "TestResource") {
@Override
public Future updateStatusAsync(Reconciliation reconciliation, HasMetadata resource) {
return Future.succeededFuture();
}
@Override
protected MixedOperation operation() {
return null;
}
@Override
public CustomResource get(String namespace, String name) {
@Group("strimzi")
@Version("v1")
class Foo extends MyResource {
@Override
public ObjectMeta getMetadata() {
ObjectMeta md = new ObjectMeta();
md.setAnnotations(singletonMap("strimzi.io/pause-reconciliation", "true"));
return md;
}
@Override
public void setMetadata(ObjectMeta objectMeta) {
}
@Override
public String getKind() {
return "TestResource";
}
@Override
public String getApiVersion() {
return "v1";
}
@Override
public void setApiVersion(String s) {
}
@Override
public Spec getSpec() {
return new Spec() {
};
}
@Override
public void setSpec(Object spec) {
}
@Override
public Status getStatus() {
return null;
}
@Override
public void setStatus(Object status) {
}
}
return new Foo();
}
@Override
public Future getAsync(String namespace, String name) {
@Group("strimzi")
@Version("v1")
class Foo extends MyResource {
@Override
public ObjectMeta getMetadata() {
ObjectMeta md = new ObjectMeta();
md.setAnnotations(singletonMap("strimzi.io/pause-reconciliation", "true"));
return md;
}
@Override
public void setMetadata(ObjectMeta objectMeta) {
}
@Override
public String getKind() {
return "TestResource";
}
@Override
public String getApiVersion() {
return "v1";
}
@Override
public void setApiVersion(String s) {
}
@Override
public Spec getSpec() {
return new Spec() {
};
}
@Override
public void setSpec(Object spec) {
}
@Override
public Status getStatus() {
return null;
}
@Override
public void setStatus(Object status) {
}
}
return Future.succeededFuture(new Foo());
}
};
}
use of io.fabric8.kubernetes.model.annotation.Group in project strimzi by strimzi.
the class PlatformFeaturesAvailabilityTest method buildAPIGroup.
private APIGroup buildAPIGroup(String group, String... versions) {
APIGroup apiGroup = new APIGroupBuilder().withName(group).build();
List<GroupVersionForDiscovery> groupVersions = new ArrayList<>();
for (String version : versions) {
groupVersions.add(new GroupVersionForDiscoveryBuilder().withGroupVersion(group + "/" + version).withVersion(version).build());
}
apiGroup.setVersions(groupVersions);
return apiGroup;
}
use of io.fabric8.kubernetes.model.annotation.Group in project strimzi by strimzi.
the class DrainCleanerIsolatedST method testDrainCleanerWithComponentsDuringNodeDraining.
@IsolatedTest
// We refer to 6 worker nodes to have always 2 nodes with same labels to properly evacuate pods from one node to another
@MultiNodeClusterOnly(workerNodeCount = 6)
void testDrainCleanerWithComponentsDuringNodeDraining(ExtensionContext extensionContext) {
TestStorage testStorage = new TestStorage(extensionContext, Constants.DRAIN_CLEANER_NAMESPACE);
String rackKey = "rack-key";
final int replicas = 3;
int size = 5;
List<String> topicNames = IntStream.range(0, size).boxed().map(i -> testStorage.getTopicName() + "-" + i).collect(Collectors.toList());
List<String> producerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getProducerName() + "-" + i).collect(Collectors.toList());
List<String> consumerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getConsumerName() + "-" + i).collect(Collectors.toList());
List<String> continuousConsumerGroups = IntStream.range(0, size).boxed().map(i -> "continuous-consumer-group-" + i).collect(Collectors.toList());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), replicas).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endKafka().editZookeeper().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endZookeeper().endSpec().build());
topicNames.forEach(topic -> resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), topic, 3, 3, 2).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().build()));
drainCleaner.createDrainCleaner(extensionContext);
String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
String zkName = KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName());
Map<String, List<String>> nodesWithPods = NodeUtils.getPodsForEachNodeInNamespace(Constants.DRAIN_CLEANER_NAMESPACE);
// remove all pods from map, which doesn't contain "kafka" or "zookeeper" in its name
nodesWithPods.forEach((node, podlist) -> podlist.retainAll(podlist.stream().filter(podName -> (podName.contains("kafka") || podName.contains("zookeeper"))).collect(Collectors.toList())));
String producerAdditionConfiguration = "delivery.timeout.ms=30000\nrequest.timeout.ms=30000";
KafkaClients kafkaBasicExampleClients;
for (int i = 0; i < size; i++) {
kafkaBasicExampleClients = new KafkaClientsBuilder().withProducerName(producerNames.get(i)).withConsumerName(consumerNames.get(i)).withTopicName(topicNames.get(i)).withConsumerGroup(continuousConsumerGroups.get(i)).withMessageCount(300).withNamespaceName(Constants.DRAIN_CLEANER_NAMESPACE).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withDelayMs(1000).withAdditionalConfig(producerAdditionConfiguration).build();
resourceManager.createResource(extensionContext, kafkaBasicExampleClients.producerStrimzi(), kafkaBasicExampleClients.consumerStrimzi());
}
LOGGER.info("Starting Node drain");
nodesWithPods.forEach((nodeName, podList) -> {
String zkPodName = podList.stream().filter(podName -> podName.contains("zookeeper")).findFirst().get();
String kafkaPodName = podList.stream().filter(podName -> podName.contains("kafka")).findFirst().get();
Map<String, String> kafkaPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(kafkaPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Map<String, String> zkPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(zkPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
NodeUtils.drainNode(nodeName);
NodeUtils.cordonNode(nodeName, true);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector(), replicas, zkPod);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector(), replicas, kafkaPod);
});
producerNames.forEach(producer -> ClientUtils.waitForClientsSuccess(producer, consumerNames.get(producerNames.indexOf(producer)), Constants.DRAIN_CLEANER_NAMESPACE, 300));
}
Aggregations