use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method addKafkaAuthorizerConfig.
private void addKafkaAuthorizerConfig(ManagedKafka managedKafka, Map<String, Object> config) {
List<String> owners = managedKafka.getSpec().getOwners();
AtomicInteger aclCount = new AtomicInteger(0);
AtomicInteger aclLoggingCount = new AtomicInteger(0);
AccessControl aclConfig = getAclConfig(managedKafka);
final String configPrefix = aclConfig.getConfigPrefix();
final String allowedListenersKey = configPrefix + "allowed-listeners";
final String resourceOperationsKey = configPrefix + "resource-operations";
final String aclKeyPrefix = configPrefix + "acl";
final String aclLoggingKeyPrefix = aclKeyPrefix + ".logging";
final String aclKeyTemplate = aclKeyPrefix + ".%03d";
final String aclLoggingKeyTemplate = aclLoggingKeyPrefix + ".%03d";
// Deprecated option: Remove when canary, must-gather, and SRE are configured via ManagedKafka CR
if (aclConfig.allowedListeners != null) {
config.put(allowedListenersKey, aclConfig.allowedListeners);
}
if (aclConfig.getLoggingSuppressionWindow() != null) {
String key = aclLoggingKeyPrefix + ".suppressionWindow";
if (aclConfig.getLoggingSuppressionWindow().getDuration() != null) {
config.put(key + ".duration", aclConfig.getLoggingSuppressionWindow().getDuration());
}
if (aclConfig.getLoggingSuppressionWindow().getApis() != null) {
config.put(key + ".apis", aclConfig.getLoggingSuppressionWindow().getApis());
}
if (aclConfig.getLoggingSuppressionWindow().getEventCount() != null) {
config.put(key + ".eventCount", aclConfig.getLoggingSuppressionWindow().getEventCount());
}
}
addAcl(aclConfig.getGlobal(), "", aclKeyTemplate, aclCount, config);
addAcl(aclConfig.getLogging(), "", aclLoggingKeyTemplate, aclLoggingCount, config);
config.put(resourceOperationsKey, aclConfig.getResourceOperations());
for (String owner : owners) {
addAcl(aclConfig.getOwner(), owner, aclKeyTemplate, aclCount, config);
}
Objects.requireNonNullElse(managedKafka.getSpec().getServiceAccounts(), Collections.<ServiceAccount>emptyList()).stream().forEach(account -> {
String aclKey = String.format(SERVICE_ACCOUNT_KEY, account.getName());
applicationConfig.getOptionalValue(aclKey, String.class).ifPresent(acl -> addAcl(acl, account.getPrincipal(), aclKeyTemplate, aclCount, config));
});
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method calculateRetentionSize.
/**
* Get the current sum of storage as reported by the pvcs.
* This may not match the requested amount ephemerally, or due to rounding
*/
@Override
public Quantity calculateRetentionSize(ManagedKafka managedKafka) {
Kafka current = cachedKafka(managedKafka);
long storageInGbs = informerManager.getPvcsInNamespace(managedKafka.getMetadata().getNamespace()).stream().map(pvc -> {
if (pvc.getStatus() == null) {
return 0L;
}
PersistentVolumeClaimStatus status = pvc.getStatus();
Quantity q = OperandUtils.getOrDefault(status.getCapacity(), "storage", (Quantity) null);
if (q == null) {
return 0L;
}
long value = Quantity.getAmountInBytes(q).longValue();
// round down to the nearest GB - the PVC request is automatically rounded up
return (long) Math.floor(((double) unpadBrokerStorage(managedKafka, current, value)) / (1L << 30));
}).collect(Collectors.summingLong(Long::longValue));
Quantity capacity = managedKafka.getSpec().getCapacity().getMaxDataRetentionSize();
// try to correct for the overall rounding
if (storageInGbs > 0 && (capacity == null || ("Gi".equals(capacity.getFormat()) && (Quantity.getAmountInBytes(capacity).longValue() / (1L << 30)) % getBrokerReplicas(managedKafka, current) != 0))) {
storageInGbs++;
}
return Quantity.parse(String.format("%sGi", storageInGbs));
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaInstance method getReadiness.
@Override
public OperandReadiness getReadiness(ManagedKafka managedKafka) {
if (managedKafka.getSpec().isDeleted()) {
// TODO: it may be a good idea to offer a message here as well
return new OperandReadiness(isDeleted(managedKafka) ? Status.False : Status.Unknown, Reason.Deleted, null);
}
List<OperandReadiness> readiness = operands.stream().map(o -> o.getReadiness(managedKafka)).filter(Objects::nonNull).collect(Collectors.toList());
// default to the first reason, with can come from the kafka by the order of the operands
Reason reason = readiness.stream().map(OperandReadiness::getReason).filter(Objects::nonNull).findFirst().orElse(null);
// default the status to false or unknown if any are unknown
Status status = readiness.stream().anyMatch(r -> Status.Unknown.equals(r.getStatus())) ? Status.Unknown : Status.False;
// combine all the messages
String message = readiness.stream().map(OperandReadiness::getMessage).filter(Objects::nonNull).collect(Collectors.joining("; "));
// override in particular scenarios
if (readiness.stream().allMatch(r -> Status.True.equals(r.getStatus()))) {
status = Status.True;
} else if (readiness.stream().anyMatch(r -> Reason.Installing.equals(r.getReason()))) {
// may mask other error states
reason = Reason.Installing;
} else if (readiness.stream().anyMatch(r -> Reason.Error.equals(r.getReason()))) {
reason = Reason.Error;
}
return new OperandReadiness(status, reason, message);
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class IngressControllerManagerTest method testGetManagedKafkaRoutesFor.
@Test
public void testGetManagedKafkaRoutesFor() {
final String mkName = "my-managedkafka";
ManagedKafka mk = new ManagedKafkaBuilder().withNewMetadata().withName(mkName).withNamespace(mkName).endMetadata().withSpec(new ManagedKafkaSpecBuilder().withNewEndpoint().withBootstrapServerHost("bs.bf2.example.tld").endEndpoint().build()).build();
final Function<? super String, ? extends Route> makeRoute = broker -> new RouteBuilder().editOrNewMetadata().withName(mkName + "-" + broker.replace("broker", "kafka")).withNamespace(mkName).addNewOwnerReference().withApiVersion(Kafka.V1BETA2).withKind(Kafka.RESOURCE_KIND).withName(AbstractKafkaCluster.kafkaClusterName(mk)).endOwnerReference().endMetadata().editOrNewSpec().withHost(broker + "-bs.bf2.example.tld").withTo(new RouteTargetReferenceBuilder().withKind("Service").withName(mkName + "-" + broker).withWeight(100).build()).endSpec().build();
final Function<? super String, ? extends Service> suffixToService = suffix -> new ServiceBuilder().editOrNewMetadata().withName(mkName + "-" + suffix).withNamespace(mkName).endMetadata().editOrNewSpec().withSelector(Map.of("dummy-label", mkName + "-" + suffix)).endSpec().build();
final Function<? super String, ? extends Pod> suffixToPod = suffix -> new PodBuilder().editOrNewMetadata().withName(mkName + "-" + suffix).withNamespace(mkName).addToLabels(Map.of("dummy-label", mkName + "-" + suffix, "app.kubernetes.io/name", "kafka", OperandUtils.MANAGED_BY_LABEL, OperandUtils.STRIMZI_OPERATOR_NAME)).endMetadata().editOrNewSpec().withNodeName("zone" + "-" + suffix).endSpec().build();
final Function<? super String, ? extends Node> suffixToNode = suffix -> new NodeBuilder().editOrNewMetadata().withName("zone" + "-" + suffix).withLabels(Map.of(IngressControllerManager.TOPOLOGY_KEY, "zone" + "-" + suffix, IngressControllerManager.WORKER_NODE_LABEL, "true")).endMetadata().build();
List<String> suffixes = List.of("broker-0", "broker-1", "broker-2");
suffixes.stream().map(makeRoute).forEach(route -> openShiftClient.routes().inNamespace(mkName).createOrReplace(route));
suffixes.stream().map(suffixToService).forEach(svc -> openShiftClient.services().inNamespace(mkName).createOrReplace(svc));
suffixes.stream().map(suffixToPod).forEach(pod -> openShiftClient.pods().inNamespace(mkName).createOrReplace(pod));
suffixes.stream().map(suffixToNode).forEach(node -> openShiftClient.nodes().createOrReplace(node));
ingressControllerManager.reconcileIngressControllers();
List<ManagedKafkaRoute> managedKafkaRoutes = ingressControllerManager.getManagedKafkaRoutesFor(mk);
assertEquals(5, managedKafkaRoutes.size());
assertEquals(managedKafkaRoutes.stream().sorted(Comparator.comparing(ManagedKafkaRoute::getName)).collect(Collectors.toList()), managedKafkaRoutes, "Expected list of ManagedKafkaRoutes to be sorted by name");
assertEquals("admin-server", managedKafkaRoutes.get(0).getName());
assertEquals("admin-server", managedKafkaRoutes.get(0).getPrefix());
assertEquals("ingresscontroller.kas.testing.domain.tld", managedKafkaRoutes.get(0).getRouter());
assertEquals("bootstrap", managedKafkaRoutes.get(1).getName());
assertEquals("", managedKafkaRoutes.get(1).getPrefix());
assertEquals("ingresscontroller.kas.testing.domain.tld", managedKafkaRoutes.get(1).getRouter());
assertEquals("broker-0", managedKafkaRoutes.get(2).getName());
assertEquals("broker-0", managedKafkaRoutes.get(2).getPrefix());
assertEquals("ingresscontroller.kas-zone-broker-0.testing.domain.tld", managedKafkaRoutes.get(2).getRouter());
assertEquals("broker-1", managedKafkaRoutes.get(3).getName());
assertEquals("broker-1", managedKafkaRoutes.get(3).getPrefix());
assertEquals("ingresscontroller.kas-zone-broker-1.testing.domain.tld", managedKafkaRoutes.get(3).getRouter());
assertEquals("broker-2", managedKafkaRoutes.get(4).getName());
assertEquals("broker-2", managedKafkaRoutes.get(4).getPrefix());
assertEquals("ingresscontroller.kas-zone-broker-2.testing.domain.tld", managedKafkaRoutes.get(4).getRouter());
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ConditionUtilsTest method testFindManagedKafkaCondition.
@Test
void testFindManagedKafkaCondition() {
ManagedKafka mk = new ManagedKafkaBuilder().withMetadata(new ObjectMetaBuilder().withNamespace("test").withName("my-managed-kafka").build()).withSpec(new ManagedKafkaSpecBuilder().withNewVersions().withKafka("2.6.0").withStrimzi("0.21.1").endVersions().build()).withStatus(new ManagedKafkaStatusBuilder().addNewCondition().withStatus(Status.True.name()).withType(ManagedKafkaCondition.Type.Ready.name()).endCondition().build()).build();
assertNotNull(ConditionUtils.findManagedKafkaCondition(mk.getStatus().getConditions(), ManagedKafkaCondition.Type.Ready).get());
}
Aggregations