Search in sources :

Example 21 with Resources

use of org.bf2.cos.fleetshard.support.resources.Resources in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaProvisioner method removeClusters.

/**
 * Removes kafka cluster
 *
 * @throws IOException
 */
public void removeClusters(boolean all) throws IOException {
    var client = cluster.kubeClient().client().resources(ManagedKafka.class).inNamespace(Constants.KAFKA_NAMESPACE);
    List<ManagedKafka> kafkas = clusters;
    if (all) {
        kafkas = client.list().getItems();
    }
    Iterator<ManagedKafka> kafkaIterator = clusters.iterator();
    while (kafkaIterator.hasNext()) {
        ManagedKafka k = kafkaIterator.next();
        LOGGER.info("Removing cluster {}", k.getMetadata().getName());
        client.withName(k.getMetadata().getName()).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
    }
    for (ManagedKafka k : kafkas) {
        org.bf2.test.TestUtils.waitFor("await delete deployment", 1_000, 600_000, () -> client.withName(k.getMetadata().getName()).get() == null);
    }
    clusters.clear();
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka)

Example 22 with Resources

use of org.bf2.cos.fleetshard.support.resources.Resources in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaProvisioner method setup.

/**
 * One-time setup of provisioner. This should be called only once per test class.
 */
public void setup() throws Exception {
    this.domain = determineDomain(cluster);
    File tls = new File("target", domain + "-tls.json");
    if (tls.exists()) {
        try (FileInputStream fis = new FileInputStream(tls)) {
            this.tlsConfig = Serialization.unmarshal(fis, SecurityUtils.TlsConfig.class);
        }
    } else {
        this.tlsConfig = SecurityUtils.getTLSConfig(domain);
        try (FileOutputStream fos = new FileOutputStream(tls)) {
            fos.write(Serialization.asYaml(this.tlsConfig).getBytes(StandardCharsets.UTF_8));
        }
    }
    try {
        this.clusters.addAll(cluster.kubeClient().client().resources(ManagedKafka.class).inNamespace(Constants.KAFKA_NAMESPACE).list().getItems());
    } catch (KubernetesClientException e) {
    }
    agentResource = this.cluster.kubeClient().client().resources(ManagedKafkaAgent.class).inNamespace(FleetShardOperatorManager.OPERATOR_NS).withName(ManagedKafkaAgentResourceClient.RESOURCE_NAME);
}
Also used : FileOutputStream(java.io.FileOutputStream) TlsConfig(org.bf2.systemtest.framework.SecurityUtils.TlsConfig) File(java.io.File) FileInputStream(java.io.FileInputStream) ManagedKafkaAgent(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgent) KubernetesClientException(io.fabric8.kubernetes.client.KubernetesClientException)

Example 23 with Resources

use of org.bf2.cos.fleetshard.support.resources.Resources in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class InstanceProfiler method sizeInstance.

protected void sizeInstance() throws Exception {
    Stream<Node> workerNodes = kafkaCluster.getWorkerNodes().stream();
    if (!collocateBrokerWithZookeeper) {
        kafkaProvisioner.validateClusterForBrokers(numberOfBrokers, false, workerNodes);
        workerNodes = kafkaCluster.getWorkerNodes().stream().filter(n -> n.getSpec().getTaints().stream().anyMatch(t -> t.getKey().equals(ManagedKafkaProvisioner.KAFKA_BROKER_TAINT_KEY)));
    }
    // note these number seem to change per release - 4.9 reports a different allocatable, than 4.8
    AvailableResources resources = getMinAvailableResources(workerNodes);
    long cpuMillis = resources.cpuMillis;
    long memoryBytes = resources.memoryBytes;
    Properties p = new Properties();
    try (InputStream is = InstanceProfiler.class.getResourceAsStream("/application.properties")) {
        p.load(is);
    }
    KafkaInstanceConfiguration defaults = Serialization.jsonMapper().convertValue(p, KafkaInstanceConfiguration.class);
    // when locating with ZK, then reduce the available resources accordingly
    if (collocateBrokerWithZookeeper) {
        // earlier code making a guess at the page cache size has been removed - until we can more reliably detect it's effect
        // there's no point in making a trade-off between extra container memory and JVM memory
        // TODO: could choose a memory size where we can fit even multiples of zookeepers
        long zookeeperBytes = Quantity.getAmountInBytes(Quantity.parse(defaults.getZookeeper().getContainerMemory())).longValue();
        long zookeeperCpu = Quantity.getAmountInBytes(Quantity.parse(defaults.getZookeeper().getContainerCpu())).movePointRight(3).longValue();
        List<Long> additionalPodCpu = new ArrayList<>();
        List<Long> additionalPodMemory = new ArrayList<>();
        additionalPodCpu.add(Quantity.getAmountInBytes(Quantity.parse(defaults.getCanary().getContainerCpu())).movePointRight(3).longValue());
        additionalPodMemory.add(Quantity.getAmountInBytes(Quantity.parse(defaults.getCanary().getContainerMemory())).longValue());
        additionalPodCpu.add(Quantity.getAmountInBytes(Quantity.parse(defaults.getAdminserver().getContainerCpu())).movePointRight(3).longValue());
        additionalPodMemory.add(Quantity.getAmountInBytes(Quantity.parse(defaults.getAdminserver().getContainerMemory())).longValue());
        additionalPodCpu.add(Quantity.getAmountInBytes(Quantity.parse(defaults.getExporter().getContainerCpu())).movePointRight(3).longValue());
        additionalPodMemory.add(Quantity.getAmountInBytes(Quantity.parse(defaults.getExporter().getContainerMemory())).longValue());
        LOGGER.info("Total overhead of additional pods {} memory, {} cpu", additionalPodMemory.stream().collect(Collectors.summingLong(Long::valueOf)), additionalPodCpu.stream().collect(Collectors.summingLong(Long::valueOf)));
        // actual needs ~ 800Mi and 1075m/1575m cpu over 3 nodes, but worst case is over two. amountNeeded will
        // estimate that in a more targeted way - but still simplified
        memoryBytes = resources.memoryBytes - density * (zookeeperBytes + amountNeeded(additionalPodMemory));
        cpuMillis = resources.cpuMillis - density * (zookeeperCpu + amountNeeded(additionalPodCpu));
    // TODO account for possible ingress replica collocation
    }
    // and if there are eventually pods that need to be collocated, and we don't want to adjust the resources downward
    if (density == 1) {
        memoryBytes -= 2 * ONE_GB;
        cpuMillis -= 500;
    } else {
        // we can assume a much tighter resource utilization for density 2 - it can fluctuate between releases
        // or may require adjustments as other pods are added or pod resource adjustments are made
        memoryBytes -= 1 * ONE_GB;
        cpuMillis -= 200;
    }
    memoryBytes = memoryBytes / density;
    cpuMillis = cpuMillis / density;
    long maxVmBytes = Math.min(memoryBytes - getVMOverheadForContainer(memoryBytes), MAX_KAFKA_VM_SIZE);
    if (density > 1) {
        maxVmBytes -= 1 * ONE_GB;
    }
    if (!autoSize) {
        long defaultMemory = Quantity.getAmountInBytes(Quantity.parse(defaults.getKafka().getContainerMemory())).longValue();
        long defaultCpu = Quantity.getAmountInBytes(Quantity.parse(defaults.getKafka().getContainerCpu())).movePointRight(3).longValue();
        long defaultMaxVmBytes = Quantity.getAmountInBytes(Quantity.parse(defaults.getKafka().getJvmXms())).longValue();
        LOGGER.info("Calculated kafka sizing {} container memory, {} container cpu, and {} vm memory", memoryBytes, cpuMillis, maxVmBytes);
        memoryBytes = defaultMemory;
        cpuMillis = defaultCpu;
        maxVmBytes = defaultMaxVmBytes;
    }
    KafkaInstanceConfiguration toUse = new KafkaInstanceConfiguration();
    toUse.getKafka().setEnableQuota(false);
    AdopterProfile.openListenersAndAccess(toUse);
    toUse.getKafka().setContainerCpu(cpuMillis + "m");
    toUse.getKafka().setJvmXms(String.valueOf(maxVmBytes));
    toUse.getKafka().setContainerMemory(String.valueOf(memoryBytes));
    profilingResult.config = toUse;
    profilingResult.config.getKafka().setColocateWithZookeeper(collocateBrokerWithZookeeper);
    profilingResult.config.getKafka().setMaxConnections(Integer.MAX_VALUE);
    profilingResult.config.getKafka().setConnectionAttemptsPerSec(Integer.MAX_VALUE);
    profilingResult.config.getKafka().setMessageMaxBytes(11534336);
    profilingResult.config.getKafka().setStorageClass(storage.name().toLowerCase());
    profilingResult.config.getZookeeper().setVolumeSize(storage.zookeeperSize);
    // once we make the determination, create the instance
    // not used as quota is turned off
    profilingResult.capacity = kafkaProvisioner.defaultCapacity(40_000_000);
    profilingResult.capacity.setMaxDataRetentionSize(Quantity.parse((GIGS * numberOfBrokers / 3) + "Gi"));
    profilingResult.capacity.setMaxPartitions(defaults.getKafka().getPartitionCapacity() * numberOfBrokers / 3);
    Kafka kafka = profilingResult.config.getKafka();
    LOGGER.info("Running with kafka sizing {} container memory, {} container cpu, and {} vm memory", kafka.getContainerMemory(), kafka.getContainerCpu(), kafka.getJvmXms());
// if running on m5.4xlarge or greater and want to constrain resources like m5.2xlarge (fully dedicated)
// profilingResult.config.getKafka().setContainerMemory("29013426176");
// profilingResult.config.getKafka().setContainerCpu("6500m");
// to constrain resources like m5.xlarge (fully dedicated)
// profilingResult.config.getKafka().setContainerMemory("12453740544");
// profilingResult.config.getKafka().setContainerCpu("2500m");
}
Also used : Quantity(io.fabric8.kubernetes.api.model.Quantity) KubeClusterResource(org.bf2.performance.framework.KubeClusterResource) Arrays(java.util.Arrays) LocalDateTime(java.time.LocalDateTime) AvailableResources(org.bf2.performance.TestUtils.AvailableResources) HashMap(java.util.HashMap) KeyDistributorType(io.openmessaging.benchmark.utils.distributor.KeyDistributorType) ArrayList(java.util.ArrayList) Resource(io.fabric8.kubernetes.client.dsl.Resource) Workload(io.openmessaging.benchmark.Workload) Serialization(io.fabric8.kubernetes.client.utils.Serialization) Kafka(org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka) Map(java.util.Map) BiConsumer(java.util.function.BiConsumer) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) Node(io.fabric8.kubernetes.api.model.Node) KubernetesClientException(io.fabric8.kubernetes.client.KubernetesClientException) Properties(java.util.Properties) Iterator(java.util.Iterator) Files(java.nio.file.Files) KafkaInstanceConfiguration(org.bf2.operator.operands.KafkaInstanceConfiguration) Environment(org.bf2.test.Environment) ManagedKafkaCapacity(org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacity) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) File(java.io.File) StandardCharsets(java.nio.charset.StandardCharsets) Consumer(java.util.function.Consumer) List(java.util.List) Logger(org.apache.logging.log4j.Logger) Stream(java.util.stream.Stream) TreeMap(java.util.TreeMap) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) TestResult(io.openmessaging.benchmark.TestResult) JsonInclude(com.fasterxml.jackson.annotation.JsonInclude) Comparator(java.util.Comparator) LogManager(org.apache.logging.log4j.LogManager) SuppressFBWarnings(org.bf2.common.SuppressFBWarnings) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) InputStream(java.io.InputStream) InputStream(java.io.InputStream) Node(io.fabric8.kubernetes.api.model.Node) ArrayList(java.util.ArrayList) Kafka(org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) KafkaInstanceConfiguration(org.bf2.operator.operands.KafkaInstanceConfiguration) AvailableResources(org.bf2.performance.TestUtils.AvailableResources) Properties(java.util.Properties)

Example 24 with Resources

use of org.bf2.cos.fleetshard.support.resources.Resources in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class OperatorST method testUpgradeStrimziVersion.

@SequentialTest
void testUpgradeStrimziVersion(ExtensionContext extensionContext) throws Exception {
    String mkAppName = "mk-test-upgrade";
    LOGGER.info("Create namespace");
    resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
    String startVersion = strimziVersions.get(strimziVersions.size() - 2);
    LOGGER.info("Create managedkafka with version {}", startVersion);
    ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, null, startVersion, latestKafkaVersion);
    mk = resourceManager.createResource(extensionContext, mk);
    Resource<ManagedKafka> mkResource = kube.client().resources(ManagedKafka.class).inNamespace(mk.getMetadata().getNamespace()).withName(mk.getMetadata().getName());
    LOGGER.info("Upgrading managedkafka to version {}", latestStrimziVersion);
    mkResource.edit(r -> {
        r.getSpec().getVersions().setStrimzi(latestStrimziVersion);
        return r;
    });
    mkResource.waitUntilCondition(m -> {
        String reason = ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason();
        return ManagedKafkaCondition.Reason.StrimziUpdating.name().equals(reason);
    }, 5, TimeUnit.MINUTES);
    mkResource.waitUntilCondition(m -> ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason() == null && latestStrimziVersion.equals(m.getStatus().getVersions().getStrimzi()), 10, TimeUnit.MINUTES);
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) SequentialTest(org.bf2.systemtest.framework.SequentialTest)

Example 25 with Resources

use of org.bf2.cos.fleetshard.support.resources.Resources in project srs-fleet-manager by bf2fc6cc711aee1a0c2a.

the class AccountManagementSystemRestClientTest method clusterAuthorization.

@Test
public void clusterAuthorization() {
    final ClusterAuthorization clusterAuthorization = ClusterAuthorization.builder().accountUsername("testUser.openshift").productId("rhosr").managed(true).byoc(false).clusterId("foobar").cloudProviderId("aws").reserve(true).availabilityZone("single").resources(Collections.singletonList(ReservedResource.builder().resourceType("cluster.aws").resourceName("rhosr").count(1).build())).build();
    final ClusterAuthorizationResponse clusterAuthorizationResponse = accountManagementSystemRestClient.clusterAuthorization(clusterAuthorization);
    Assertions.assertNotNull(clusterAuthorizationResponse);
    Assertions.assertTrue(clusterAuthorizationResponse.getAllowed());
    Assertions.assertNotNull(clusterAuthorizationResponse.getSubscription().getId());
}
Also used : ClusterAuthorization(org.bf2.srs.fleetmanager.spi.ams.impl.model.request.ClusterAuthorization) ClusterAuthorizationResponse(org.bf2.srs.fleetmanager.spi.ams.impl.model.response.ClusterAuthorizationResponse) Test(org.junit.jupiter.api.Test)

Aggregations

Test (org.junit.jupiter.api.Test)10 Secret (io.fabric8.kubernetes.api.model.Secret)6 File (java.io.File)6 ManagedKafka (org.bf2.operator.resources.v1alpha1.ManagedKafka)6 ObjectNode (com.fasterxml.jackson.databind.node.ObjectNode)4 ObjectMetaBuilder (io.fabric8.kubernetes.api.model.ObjectMetaBuilder)4 MeterRegistry (io.micrometer.core.instrument.MeterRegistry)4 List (java.util.List)4 NamespaceBuilder (io.fabric8.kubernetes.api.model.NamespaceBuilder)3 Quantity (io.fabric8.kubernetes.api.model.Quantity)3 SecretBuilder (io.fabric8.kubernetes.api.model.SecretBuilder)3 KubernetesClientException (io.fabric8.kubernetes.client.KubernetesClientException)3 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 ConnectorDeployment (org.bf2.cos.fleet.manager.model.ConnectorDeployment)3 ManagedConnector (org.bf2.cos.fleetshard.api.ManagedConnector)3 ManagedConnectorBuilder (org.bf2.cos.fleetshard.api.ManagedConnectorBuilder)3 FleetShardSyncConfig (org.bf2.cos.fleetshard.sync.FleetShardSyncConfig)3 FleetManagerClient (org.bf2.cos.fleetshard.sync.client.FleetManagerClient)3 FleetShardClient (org.bf2.cos.fleetshard.sync.client.FleetShardClient)3