use of org.bf2.cos.fleet.manager.model.ServiceAccount in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractOperandController method reify.
@Override
public List<HasMetadata> reify(ManagedConnector connector, Secret secret) {
LOGGER.debug("Reifying connector: {} and secret.metadata: {}", connector, secret.getMetadata());
final ServiceAccount serviceAccountSettings = extract(secret, SECRET_ENTRY_SERVICE_ACCOUNT, ServiceAccount.class);
LOGGER.debug("Extracted serviceAccount {}", serviceAccountSettings == null ? "is null" : "with clientId: " + serviceAccountSettings.getClientId());
ServiceAccountSpec sas = serviceAccountSettings == null ? new ServiceAccountSpecBuilder().build() : new ServiceAccountSpecBuilder().withClientId(serviceAccountSettings.getClientId()).withClientSecret(serviceAccountSettings.getClientSecret()).build();
ConnectorConfiguration<S> connectorConfig;
try {
connectorConfig = new ConnectorConfiguration<>(extract(secret, SECRET_ENTRY_CONNECTOR, ObjectNode.class), connectorSpecType);
} catch (IncompleteConnectorSpecException e) {
throw new RuntimeException("Incomplete connectorSpec for connector \"" + connector.getSpec().getConnectorId() + "@" + connector.getSpec().getDeploymentId() + "#" + connector.getSpec().getDeployment().getDeploymentResourceVersion() + "\": " + e.getLocalizedMessage(), e);
}
return doReify(connector, extract(secret, SECRET_ENTRY_META, metadataType), connectorConfig, sas);
}
use of org.bf2.cos.fleet.manager.model.ServiceAccount in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class ConnectorTestSupport method createDeployment.
public static ConnectorDeployment createDeployment(long deploymentRevision, Supplier<JsonNode> connectorSpec, Supplier<JsonNode> connectorMeta) {
final String deploymentId = "did";
final String connectorId = "cid";
final String connectorTypeId = "ctid";
return new ConnectorDeployment().kind("ConnectorDeployment").id(deploymentId).metadata(new ConnectorDeploymentAllOfMetadata().resourceVersion(deploymentRevision)).spec(new ConnectorDeploymentSpec().connectorId(connectorId).connectorTypeId(connectorTypeId).connectorResourceVersion(1L).kafka(new KafkaConnectionSettings().url("kafka.acme.com:2181")).schemaRegistry(new SchemaRegistryConnectionSettings().url("schemaregistry.acme.com:2282")).serviceAccount(new ServiceAccount().clientId(UUID.randomUUID().toString()).clientSecret(toBase64(UUID.randomUUID().toString()))).connectorSpec(connectorSpec.get()).shardMetadata(connectorMeta.get()).desiredState(DESIRED_STATE_READY));
}
use of org.bf2.cos.fleet.manager.model.ServiceAccount in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method addKafkaAuthorizerConfig.
private void addKafkaAuthorizerConfig(ManagedKafka managedKafka, Map<String, Object> config) {
List<String> owners = managedKafka.getSpec().getOwners();
AtomicInteger aclCount = new AtomicInteger(0);
AtomicInteger aclLoggingCount = new AtomicInteger(0);
AccessControl aclConfig = getAclConfig(managedKafka);
final String configPrefix = aclConfig.getConfigPrefix();
final String allowedListenersKey = configPrefix + "allowed-listeners";
final String resourceOperationsKey = configPrefix + "resource-operations";
final String aclKeyPrefix = configPrefix + "acl";
final String aclLoggingKeyPrefix = aclKeyPrefix + ".logging";
final String aclKeyTemplate = aclKeyPrefix + ".%03d";
final String aclLoggingKeyTemplate = aclLoggingKeyPrefix + ".%03d";
// Deprecated option: Remove when canary, must-gather, and SRE are configured via ManagedKafka CR
if (aclConfig.allowedListeners != null) {
config.put(allowedListenersKey, aclConfig.allowedListeners);
}
if (aclConfig.getLoggingSuppressionWindow() != null) {
String key = aclLoggingKeyPrefix + ".suppressionWindow";
if (aclConfig.getLoggingSuppressionWindow().getDuration() != null) {
config.put(key + ".duration", aclConfig.getLoggingSuppressionWindow().getDuration());
}
if (aclConfig.getLoggingSuppressionWindow().getApis() != null) {
config.put(key + ".apis", aclConfig.getLoggingSuppressionWindow().getApis());
}
if (aclConfig.getLoggingSuppressionWindow().getEventCount() != null) {
config.put(key + ".eventCount", aclConfig.getLoggingSuppressionWindow().getEventCount());
}
}
addAcl(aclConfig.getGlobal(), "", aclKeyTemplate, aclCount, config);
addAcl(aclConfig.getLogging(), "", aclLoggingKeyTemplate, aclLoggingCount, config);
config.put(resourceOperationsKey, aclConfig.getResourceOperations());
for (String owner : owners) {
addAcl(aclConfig.getOwner(), owner, aclKeyTemplate, aclCount, config);
}
Objects.requireNonNullElse(managedKafka.getSpec().getServiceAccounts(), Collections.<ServiceAccount>emptyList()).stream().forEach(account -> {
String aclKey = String.format(SERVICE_ACCOUNT_KEY, account.getName());
applicationConfig.getOptionalValue(aclKey, String.class).ifPresent(acl -> addAcl(acl, account.getPrincipal(), aclKeyTemplate, aclCount, config));
});
}
use of org.bf2.cos.fleet.manager.model.ServiceAccount in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method addQuotaConfig.
private void addQuotaConfig(ManagedKafka managedKafka, Kafka current, Map<String, Object> config) {
config.put("client.quota.callback.class", IO_STRIMZI_KAFKA_QUOTA_STATIC_QUOTA_CALLBACK);
// Throttle at Ingress/Egress MB/sec per broker
config.put(QUOTA_PRODUCE, String.valueOf(getIngressBytes(managedKafka, current)));
config.put(QUOTA_FETCH, String.valueOf(getEgressBytes(managedKafka, current)));
// Start throttling when disk is above requested size. Full stop when only storageMinMargin is free.
Quantity maxDataRetentionSize = getAdjustedMaxDataRetentionSize(managedKafka, current);
long hardStorageLimit = Quantity.getAmountInBytes(maxDataRetentionSize).longValue() - Quantity.getAmountInBytes(storageMinMargin).longValue();
long softStorageLimit = Quantity.getAmountInBytes(maxDataRetentionSize).longValue() - getStoragePadding(managedKafka, current);
config.put("client.quota.callback.static.storage.soft", String.valueOf(softStorageLimit));
config.put("client.quota.callback.static.storage.hard", String.valueOf(hardStorageLimit));
// Check storage every storageCheckInterval seconds
config.put("client.quota.callback.static.storage.check-interval", String.valueOf(storageCheckInterval));
// Configure the quota plugin so that the canary is not subjected to the quota checks.
Optional<ServiceAccount> canaryServiceAccount = managedKafka.getServiceAccount(ServiceAccount.ServiceAccountName.Canary);
canaryServiceAccount.ifPresent(serviceAccount -> config.put("client.quota.callback.static.excluded.principal.name.list", serviceAccount.getPrincipal()));
config.put("quota.window.num", "30");
config.put("quota.window.size.seconds", "2");
}
Aggregations