use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project kafka-admin-api by bf2fc6cc711aee1a0c2a.
the class RecordEndpointTestIT method setup.
@BeforeEach
void setup() {
topicUtils = new TopicUtils(config, null);
topicUtils.deleteAllTopics();
recordUtils = new RecordUtils(config, null);
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project kafka-admin-api by bf2fc6cc711aee1a0c2a.
the class RestEndpointTestIT method setup.
@BeforeEach
void setup() {
topicUtils = new TopicUtils(config, null);
topicUtils.deleteAllTopics();
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method buildKafkaConfig.
private Map<String, Object> buildKafkaConfig(ManagedKafka managedKafka, Kafka current) {
Map<String, Object> config = new HashMap<>();
int scalingAndReplicationFactor = this.config.getKafka().getScalingAndReplicationFactor();
config.put("offsets.topic.replication.factor", scalingAndReplicationFactor);
config.put("transaction.state.log.min.isr", Math.min(scalingAndReplicationFactor, 2));
config.put("transaction.state.log.replication.factor", scalingAndReplicationFactor);
config.put("auto.create.topics.enable", "false");
config.put("min.insync.replicas", Math.min(scalingAndReplicationFactor, 2));
config.put("default.replication.factor", scalingAndReplicationFactor);
config.put("log.message.format.version", this.kafkaManager.currentKafkaLogMessageFormatVersion(managedKafka));
config.put("inter.broker.protocol.version", this.kafkaManager.currentKafkaIbpVersion(managedKafka));
config.put("ssl.enabled.protocols", "TLSv1.3,TLSv1.2");
config.put("ssl.protocol", "TLS");
ManagedKafkaAuthenticationOAuth oauth = managedKafka.getSpec().getOauth();
var maximumSessionLifetime = oauth != null ? oauth.getMaximumSessionLifetime() : null;
long maxReauthMs = maximumSessionLifetime != null ? Math.max(maximumSessionLifetime, 0) : this.config.getKafka().getMaximumSessionLifetimeDefault();
config.put("connections.max.reauth.ms", maxReauthMs);
if (managedKafka.getSpec().getVersions().compareStrimziVersionTo(Versions.STRIMZI_CLUSTER_OPERATOR_V0_23_0_4) >= 0) {
// extension to manage the create topic to ensure valid Replication Factor and ISR
config.put("create.topic.policy.class.name", "io.bf2.kafka.topic.ManagedKafkaCreateTopicPolicy");
}
// forcing the preferred leader election as soon as possible
// NOTE: mostly useful for canary when Kafka brokers roll, partitions move but a preferred leader is not elected
// this could be removed, when we contribute to Sarama to have the support for Elect Leader API
config.put("leader.imbalance.per.broker.percentage", 0);
config.put(MESSAGE_MAX_BYTES, this.config.getKafka().getMessageMaxBytes());
// configure quota plugin
if (this.config.getKafka().isEnableQuota()) {
addQuotaConfig(managedKafka, current, config);
}
// custom authorizer configuration
addKafkaAuthorizerConfig(managedKafka, config);
if (managedKafka.getSpec().getCapacity().getMaxPartitions() != null) {
config.put(MAX_PARTITIONS, managedKafka.getSpec().getCapacity().getMaxPartitions());
}
config.put("strimzi.authorization.custom-authorizer.partition-counter.timeout-seconds", 10);
config.put("strimzi.authorization.custom-authorizer.partition-counter.schedule-interval-seconds", 15);
config.put("strimzi.authorization.custom-authorizer.partition-counter.private-topic-prefix", this.config.kafka.acl.privatePrefix);
config.put("strimzi.authorization.custom-authorizer.adminclient-listener.name", "controlplane-9090");
config.put("strimzi.authorization.custom-authorizer.adminclient-listener.port", 9090);
config.put("strimzi.authorization.custom-authorizer.adminclient-listener.protocol", "SSL");
return config;
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method addQuotaConfig.
private void addQuotaConfig(ManagedKafka managedKafka, Kafka current, Map<String, Object> config) {
config.put("client.quota.callback.class", IO_STRIMZI_KAFKA_QUOTA_STATIC_QUOTA_CALLBACK);
// Throttle at Ingress/Egress MB/sec per broker
config.put(QUOTA_PRODUCE, String.valueOf(getIngressBytes(managedKafka, current)));
config.put(QUOTA_FETCH, String.valueOf(getEgressBytes(managedKafka, current)));
// Start throttling when disk is above requested size. Full stop when only storageMinMargin is free.
Quantity maxDataRetentionSize = getAdjustedMaxDataRetentionSize(managedKafka, current);
long hardStorageLimit = Quantity.getAmountInBytes(maxDataRetentionSize).longValue() - Quantity.getAmountInBytes(storageMinMargin).longValue();
long softStorageLimit = Quantity.getAmountInBytes(maxDataRetentionSize).longValue() - getStoragePadding(managedKafka, current);
config.put("client.quota.callback.static.storage.soft", String.valueOf(softStorageLimit));
config.put("client.quota.callback.static.storage.hard", String.valueOf(hardStorageLimit));
// Check storage every storageCheckInterval seconds
config.put("client.quota.callback.static.storage.check-interval", String.valueOf(storageCheckInterval));
// Configure the quota plugin so that the canary is not subjected to the quota checks.
Optional<ServiceAccount> canaryServiceAccount = managedKafka.getServiceAccount(ServiceAccount.ServiceAccountName.Canary);
canaryServiceAccount.ifPresent(serviceAccount -> config.put("client.quota.callback.static.excluded.principal.name.list", serviceAccount.getPrincipal()));
config.put("quota.window.num", "30");
config.put("quota.window.size.seconds", "2");
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaAgentController method statusUpdateLoop.
@Timed(value = "controller.status.update", extraTags = { "resource", "ManagedKafkaAgent" }, description = "Time spent processing status updates")
@Counted(value = "controller.status.update", extraTags = { "resource", "ManagedKafkaAgent" }, description = "The number of status updates")
@Scheduled(every = "{agent.status.interval}", concurrentExecution = ConcurrentExecution.SKIP)
void statusUpdateLoop() {
ManagedKafkaAgent resource = this.agentClient.getByName(this.agentClient.getNamespace(), ManagedKafkaAgentResourceClient.RESOURCE_NAME);
if (resource != null) {
// check and reinstate if the observability config changed
this.observabilityManager.createOrUpdateObservabilitySecret(resource.getSpec().getObservability(), resource);
log.debugf("Tick to update Kafka agent Status in namespace %s", this.agentClient.getNamespace());
resource.setStatus(buildStatus(resource));
this.agentClient.replaceStatus(resource);
}
}
Aggregations