use of org.bf2.cos.fleetshard.api.Operator in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class DevelopmentKafkaCluster method kafkaFrom.
/* test */
@Override
public Kafka kafkaFrom(ManagedKafka managedKafka, Kafka current) {
KafkaBuilder builder = current != null ? new KafkaBuilder(current) : new KafkaBuilder();
int replicas = getReplicas(managedKafka);
Kafka kafka = builder.editOrNewMetadata().withName(kafkaClusterName(managedKafka)).withNamespace(kafkaClusterNamespace(managedKafka)).withLabels(buildLabels(managedKafka)).endMetadata().editOrNewSpec().editOrNewKafka().withVersion(managedKafka.getSpec().getVersions().getKafka()).withReplicas(replicas).withListeners(buildListeners(managedKafka, replicas)).withStorage(buildStorage()).withConfig(buildKafkaConfig(managedKafka)).withTemplate(getKafkaTemplate(managedKafka)).withImage(kafkaImage.orElse(null)).endKafka().editOrNewZookeeper().withReplicas(this.config.getZookeeper().getReplicas()).withStorage((SingleVolumeStorage) buildStorage()).withTemplate(getZookeeperTemplate(managedKafka)).withImage(zookeeperImage.orElse(null)).endZookeeper().endSpec().build();
// setting the ManagedKafka has owner of the Kafka resource is needed
// by the operator sdk to handle events on the Kafka resource properly
OperandUtils.setAsOwner(managedKafka, kafka);
return kafka;
}
use of org.bf2.cos.fleetshard.api.Operator in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaResourceType method getDefault.
/**
* get common default managedkafka instance
*
* @throws Exception
*/
public static ManagedKafka getDefault(String namespace, String appName, KeycloakInstance keycloak, String strimziVersion, String kafkaVersion) throws Exception {
final String tlsCert;
final String tlsKey;
String hostDomain = SystemTestEnvironment.BOOTSTRAP_HOST_DOMAIN;
if (!KubeClient.getInstance().isGenericKubernetes()) {
OpenShiftClient cli = KubeClient.getInstance().client().adapt(OpenShiftClient.class);
hostDomain = Optional.ofNullable(cli.operator().ingressControllers().inNamespace("openshift-ingress-operator").withName("sharded").get()).orElse(cli.operator().ingressControllers().inNamespace("openshift-ingress-operator").withName("default").get()).getStatus().getDomain();
}
if (SystemTestEnvironment.DUMMY_CERT.equals(SystemTestEnvironment.ENDPOINT_TLS_CERT)) {
SecurityUtils.TlsConfig tlsConfig = SecurityUtils.getTLSConfig(hostDomain);
tlsCert = tlsConfig.getCert();
tlsKey = tlsConfig.getKey();
} else {
tlsCert = SystemTestEnvironment.ENDPOINT_TLS_CERT;
tlsKey = SystemTestEnvironment.ENDPOINT_TLS_KEY;
}
final String oauthClientId;
final String oauthTlsCert;
final String oauthClientSecret;
final String oauthUserClaim;
final String oauthFallbackUserClaim;
final String oauthJwksEndpoint;
final String oauthTokenEndpoint;
final String oauthIssuerEndpoint;
if (keycloak != null) {
oauthClientId = "kafka";
oauthTlsCert = keycloak.getKeycloakCert();
oauthClientSecret = "kafka";
oauthUserClaim = keycloak.getUserNameClaim();
oauthFallbackUserClaim = keycloak.getFallbackUserNameClaim();
oauthJwksEndpoint = keycloak.getJwksEndpointUri();
oauthTokenEndpoint = keycloak.getOauthTokenEndpointUri();
oauthIssuerEndpoint = keycloak.getValidIssuerUri();
} else if (SystemTestEnvironment.DUMMY_OAUTH_JWKS_URI.equals(SystemTestEnvironment.OAUTH_JWKS_ENDPOINT)) {
oauthClientId = null;
oauthTlsCert = null;
oauthClientSecret = null;
oauthUserClaim = null;
oauthFallbackUserClaim = null;
oauthJwksEndpoint = null;
oauthTokenEndpoint = null;
oauthIssuerEndpoint = null;
} else {
// use defined values by env vars for oauth
oauthClientId = SystemTestEnvironment.OAUTH_CLIENT_ID;
oauthTlsCert = SystemTestEnvironment.DUMMY_CERT.equals(SystemTestEnvironment.OAUTH_TLS_CERT) ? null : SystemTestEnvironment.OAUTH_TLS_CERT;
oauthClientSecret = SystemTestEnvironment.OAUTH_CLIENT_SECRET;
oauthUserClaim = SystemTestEnvironment.OAUTH_USER_CLAIM;
oauthFallbackUserClaim = SystemTestEnvironment.OAUTH_FALLBACK_USER_CLAIM;
oauthJwksEndpoint = SystemTestEnvironment.OAUTH_JWKS_ENDPOINT;
oauthTokenEndpoint = SystemTestEnvironment.OAUTH_TOKEN_ENDPOINT;
oauthIssuerEndpoint = SystemTestEnvironment.OAUTH_ISSUER_ENDPOINT;
}
return ManagedKafka.getDefault(appName, namespace, hostDomain, tlsCert, tlsKey, oauthClientId, oauthTlsCert, oauthClientSecret, oauthUserClaim, oauthFallbackUserClaim, oauthJwksEndpoint, oauthTokenEndpoint, oauthIssuerEndpoint, strimziVersion, kafkaVersion);
}
use of org.bf2.cos.fleetshard.api.Operator in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaAgentController method buildStatus.
/**
* TODO: this needs to be replaced with actual metrics
* @return
*/
private ManagedKafkaAgentStatus buildStatus(ManagedKafkaAgent resource) {
ManagedKafkaAgentStatus status = resource.getStatus();
ManagedKafkaCondition readyCondition = null;
if (status != null) {
readyCondition = ConditionUtils.findManagedKafkaCondition(status.getConditions(), Type.Ready).orElse(null);
}
List<StrimziVersionStatus> strimziVersions = this.strimziManager.getStrimziVersions();
log.debugf("Strimzi versions %s", strimziVersions);
// consider the fleetshard operator ready when observability is running and a Strimzi bundle is installed (aka at least one available version)
Status statusValue = this.observabilityManager.isObservabilityRunning() && !strimziVersions.isEmpty() ? ManagedKafkaCondition.Status.True : ManagedKafkaCondition.Status.False;
if (readyCondition == null) {
readyCondition = ConditionUtils.buildCondition(ManagedKafkaCondition.Type.Ready, statusValue);
} else {
ConditionUtils.updateConditionStatus(readyCondition, statusValue, null, null);
}
ClusterCapacity total = new ClusterCapacityBuilder().withConnections(10000).withDataRetentionSize(Quantity.parse("40Gi")).withIngressEgressThroughputPerSec(Quantity.parse("40Gi")).withPartitions(10000).build();
ClusterCapacity remaining = new ClusterCapacityBuilder().withConnections(10000).withDataRetentionSize(Quantity.parse("40Gi")).withIngressEgressThroughputPerSec(Quantity.parse("40Gi")).withPartitions(10000).build();
ClusterCapacity delta = new ClusterCapacityBuilder().withConnections(10000).withDataRetentionSize(Quantity.parse("40Gi")).withIngressEgressThroughputPerSec(Quantity.parse("40Gi")).withPartitions(10000).build();
NodeCounts nodeInfo = new NodeCountsBuilder().withCeiling(0).withCurrent(0).withCurrentWorkLoadMinimum(0).withFloor(0).build();
ClusterResizeInfo resize = new ClusterResizeInfoBuilder().withDelta(delta).withNodeDelta(3).build();
return new ManagedKafkaAgentStatusBuilder().withConditions(status == null ? Arrays.asList(readyCondition) : status.getConditions()).withTotal(total).withRemaining(remaining).withNodeInfo(nodeInfo).withResizeInfo(resize).withUpdatedTimestamp(ConditionUtils.iso8601Now()).withStrimzi(strimziVersions).build();
}
use of org.bf2.cos.fleetshard.api.Operator in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaProvisioner method install.
/**
* Install this Kafka provisioner. This can be called once per test class or per test method.
*/
public void install() throws Exception {
// delete/create the namespaces to be used
Map<String, String> nsAnnotations = new HashMap<>();
if (PerformanceEnvironment.KAFKA_COLLECT_LOG) {
nsAnnotations.put(Constants.ORG_BF2_KAFKA_PERFORMANCE_COLLECTPODLOG, "true");
}
cluster.waitForDeleteNamespace(StrimziOperatorManager.OPERATOR_NS);
FleetShardOperatorManager.deleteFleetShard(cluster.kubeClient()).get(2, TimeUnit.MINUTES);
cluster.createNamespace(Constants.KAFKA_NAMESPACE, nsAnnotations, Map.of());
List<Node> workers = cluster.getWorkerNodes();
boolean smallNodes = workers.stream().anyMatch(n -> TestUtils.getMaxAvailableResources(n).cpuMillis < 3000);
if (smallNodes) {
MixedOperation<Deployment, DeploymentList, RollableScalableResource<Deployment>> deployments = cluster.kubeClient().client().apps().deployments();
this.informer = deployments.inAnyNamespace().inform(new ResourceEventHandler<Deployment>() {
@Override
public void onUpdate(Deployment oldObj, Deployment newObj) {
onAdd(newObj);
}
@Override
public void onDelete(Deployment obj, boolean deletedFinalStateUnknown) {
}
@Override
public void onAdd(Deployment obj) {
if (!obj.getMetadata().getNamespace().equals(StrimziOperatorManager.OPERATOR_NS) && !obj.getMetadata().getNamespace().equals(FleetShardOperatorManager.OPERATOR_NS)) {
return;
}
// patch any deployment that requests a lot of cpu, and make sure it's on the perf infra
deployments.inNamespace(obj.getMetadata().getNamespace()).withName(obj.getMetadata().getName()).edit(new TypedVisitor<ResourceRequirementsBuilder>() {
@Override
public void visit(ResourceRequirementsBuilder element) {
Quantity cpu = null;
if (element.getRequests() != null) {
cpu = element.getRequests().get("cpu");
}
if (cpu == null && element.getLimits() != null) {
cpu = element.getLimits().get("cpu");
}
if (cpu != null && Quantity.getAmountInBytes(cpu).compareTo(BigDecimal.valueOf(1)) > 0) {
element.addToRequests("cpu", Quantity.parse("1"));
}
}
});
}
});
}
// installs the Strimzi Operator using the OLM bundle
CompletableFuture<Void> strimziFuture = strimziManager.deployStrimziOperator();
cluster.connectNamespaceToMonitoringStack(StrimziOperatorManager.OPERATOR_NS);
// installs a cluster wide fleetshard operator
// not looking at the returned futures - it's assumed that we'll eventually wait on the managed kafka deployment
CompletableFuture<Void> future = FleetShardOperatorManager.deployFleetShardOperator(cluster.kubeClient());
CompletableFuture.allOf(future, strimziFuture).get(2, TimeUnit.MINUTES);
var agentResource = this.cluster.kubeClient().client().resource(new ManagedKafkaAgentBuilder().withNewMetadata().withName(ManagedKafkaAgentResourceClient.RESOURCE_NAME).withNamespace(FleetShardOperatorManager.OPERATOR_NS).endMetadata().withSpec(new ManagedKafkaAgentSpecBuilder().withNewObservability().withAccessToken("").withChannel("").withRepository("").withTag("").endObservability().build()).build());
agentResource.createOrReplace();
// FleetShardOperatorManager.deployFleetShardSync(cluster.kubeClient());
cluster.connectNamespaceToMonitoringStack(FleetShardOperatorManager.OPERATOR_NS);
strimziVersions = SyncApiClient.getSortedAvailableStrimziVersions(() -> agentResource.fromServer().get().getStatus()).collect(Collectors.toList());
}
use of org.bf2.cos.fleetshard.api.Operator in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaSync method reconcile.
/**
* Final sync processing of the remote vs. local state
*/
void reconcile(String remoteId, String localMetaNamespaceKey) {
ManagedKafka local = null;
if (localMetaNamespaceKey != null) {
// refresh the local
local = lookup.getLocalManagedKafka(localMetaNamespaceKey);
}
ManagedKafka remote = null;
if (remoteId != null) {
// refresh the remote
remote = controlPlane.getDesiredState(remoteId);
}
if (local == null && remote == null) {
// nothing to do
return;
}
String id = null;
if (local != null) {
id = local.getId();
} else {
id = remote.getId();
}
if (id != null) {
NDC.push(ManagedKafkaResourceClient.ID_LOG_KEY + "=" + id);
}
try {
if (local == null) {
if (!remote.getSpec().isDeleted()) {
create(remote);
}
} else if (remote == null) {
if (deleteAllowed(local)) {
delete(local);
}
} else {
if (!Objects.equals(local.getPlacementId(), remote.getPlacementId())) {
log.debugf("Waiting for existing ManagedKafka %s to disappear before attempting next placement", local.getPlacementId());
return;
}
if (specChanged(remote.getSpec(), local)) {
log.debugf("Updating ManagedKafka Spec for %s", Cache.metaNamespaceKeyFunc(local));
ManagedKafkaSpec spec = remote.getSpec();
client.edit(local.getMetadata().getNamespace(), local.getMetadata().getName(), mk -> {
mk.setSpec(spec);
return mk;
});
// the operator will handle it from here
}
}
} finally {
if (id != null) {
NDC.pop();
}
}
}
Aggregations