use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project kafka-admin-api by bf2fc6cc711aee1a0c2a.
the class PartitionsOffsetOauthIT method setup.
@BeforeEach
void setup() {
tokenUtils = new TokenUtils(config);
consumerUtils = new ConsumerUtils(config, tokenUtils.getToken(UserType.OWNER.getUsername()));
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method addKafkaAuthorizerConfig.
private void addKafkaAuthorizerConfig(ManagedKafka managedKafka, Map<String, Object> config) {
List<String> owners = managedKafka.getSpec().getOwners();
AtomicInteger aclCount = new AtomicInteger(0);
AtomicInteger aclLoggingCount = new AtomicInteger(0);
AccessControl aclConfig = getAclConfig(managedKafka);
final String configPrefix = aclConfig.getConfigPrefix();
final String allowedListenersKey = configPrefix + "allowed-listeners";
final String resourceOperationsKey = configPrefix + "resource-operations";
final String aclKeyPrefix = configPrefix + "acl";
final String aclLoggingKeyPrefix = aclKeyPrefix + ".logging";
final String aclKeyTemplate = aclKeyPrefix + ".%03d";
final String aclLoggingKeyTemplate = aclLoggingKeyPrefix + ".%03d";
// Deprecated option: Remove when canary, must-gather, and SRE are configured via ManagedKafka CR
if (aclConfig.allowedListeners != null) {
config.put(allowedListenersKey, aclConfig.allowedListeners);
}
if (aclConfig.getLoggingSuppressionWindow() != null) {
String key = aclLoggingKeyPrefix + ".suppressionWindow";
if (aclConfig.getLoggingSuppressionWindow().getDuration() != null) {
config.put(key + ".duration", aclConfig.getLoggingSuppressionWindow().getDuration());
}
if (aclConfig.getLoggingSuppressionWindow().getApis() != null) {
config.put(key + ".apis", aclConfig.getLoggingSuppressionWindow().getApis());
}
if (aclConfig.getLoggingSuppressionWindow().getEventCount() != null) {
config.put(key + ".eventCount", aclConfig.getLoggingSuppressionWindow().getEventCount());
}
}
addAcl(aclConfig.getGlobal(), "", aclKeyTemplate, aclCount, config);
addAcl(aclConfig.getLogging(), "", aclLoggingKeyTemplate, aclLoggingCount, config);
config.put(resourceOperationsKey, aclConfig.getResourceOperations());
for (String owner : owners) {
addAcl(aclConfig.getOwner(), owner, aclKeyTemplate, aclCount, config);
}
Objects.requireNonNullElse(managedKafka.getSpec().getServiceAccounts(), Collections.<ServiceAccount>emptyList()).stream().forEach(account -> {
String aclKey = String.format(SERVICE_ACCOUNT_KEY, account.getName());
applicationConfig.getOptionalValue(aclKey, String.class).ifPresent(acl -> addAcl(acl, account.getPrincipal(), aclKeyTemplate, aclCount, config));
});
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaValueProdMinimumTest method doTestValueProdMinimum.
private void doTestValueProdMinimum(ManagedKafkaCapacity capacity, int workerProducerRate, int numClients, String zkContainerMemory, String zkJavaMemory, String kafkaContainerMemory, String kafkaJavaMemory, String kfCpu, int topics, int partitionsPerTopic, String key, String testName) throws Exception {
int numWorkers = numClients / 10;
int messageSize = 1024;
ensureClientClusterCapacityForWorkers(omb.getOmbCluster(), numWorkers, WORKER_SIZE, CPU_SIZE);
workers = omb.deployWorkers(numWorkers);
LOGGER.info("Test config: {}", key);
KafkaInstanceConfiguration profile = AdopterProfile.buildProfile(zkContainerMemory, zkJavaMemory, "1000m", kafkaContainerMemory, kafkaJavaMemory, kfCpu);
String bootstrapHosts = kafkaProvisioner.deployCluster("cluster1", capacity, profile).waitUntilReady();
OMBDriver driver = new OMBDriver().setReplicationFactor(3).setTopicConfig("min.insync.replicas=2\n").setCommonConfigWithBootstrapUrl(bootstrapHosts).setProducerConfig("acks=all\n").setConsumerConfig("auto.offset.reset=earliest\nenable.auto.commit=false\n");
int producerConsumer = numClients / topics / 2;
OMBWorkloadResult result = omb.runWorkload(instanceDir, driver, workers, new OMBWorkload().setName(key).setTopics(topics).setPartitionsPerTopic(partitionsPerTopic).setMessageSize(messageSize).setPayloadFile("src/test/resources/payload/payload-1Kb.data").setSubscriptionsPerTopic(1).setConsumerPerSubscription(producerConsumer).setProducersPerTopic(producerConsumer).setProducerRate(workerProducerRate).setConsumerBacklogSizeGB(0));
LOGGER.info("{} : results {}", key, result.getResultFile());
// double threshold = 0.9 * targetRate;
// List<Double> lowProduceRates = result.getTestResult().publishRate.stream().filter(rate -> rate < threshold).collect(Collectors.toList());
// List<Double> lowConsumeRates = result.getTestResult().consumeRate.stream().filter(rate -> rate < threshold).collect(Collectors.toList());
// LOGGER.info("{}: low produce : {} low consume: {}", key, lowProduceRates, lowConsumeRates);
// assertTrue(lowProduceRates.isEmpty(), "Unexpectedly low produce rate(s): " + lowProduceRates);
// assertTrue(lowConsumeRates.isEmpty(), "Unexpectedly low consume rate(s): " + lowConsumeRates);
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testManagedKafkaToKafkaWithSizeChanges.
@Test
void testManagedKafkaToKafkaWithSizeChanges() throws IOException {
KafkaInstanceConfiguration config = kafkaCluster.getKafkaConfiguration();
try {
ObjectMapper objectMapper = new ObjectMapper();
KafkaInstanceConfiguration clone = objectMapper.readValue(objectMapper.writeValueAsString(config), KafkaInstanceConfiguration.class);
clone.getKafka().setOneInstancePerNode(false);
clone.getKafka().setColocateWithZookeeper(false);
clone.getExporter().setColocateWithZookeeper(false);
kafkaCluster.setKafkaConfiguration(clone);
Kafka kafka = kafkaCluster.kafkaFrom(exampleManagedKafka("60Gi"), null);
Kafka reduced = kafkaCluster.kafkaFrom(exampleManagedKafka("40Gi"), kafka);
// should not change to a smaller size
diffToExpected(reduced, "/expected/strimzi.yml");
Kafka larger = kafkaCluster.kafkaFrom(exampleManagedKafka("80Gi"), kafka);
// should change to a larger size
diffToExpected(larger, "/expected/strimzi.yml", "[{\"op\":\"replace\",\"path\":\"/spec/kafka/config/client.quota.callback.static.storage.soft\",\"value\":\"28633115306\"},{\"op\":\"replace\",\"path\":\"/spec/kafka/config/client.quota.callback.static.storage.hard\",\"value\":\"28675058306\"},{\"op\":\"replace\",\"path\":\"/spec/kafka/storage/volumes/0/size\",\"value\":\"39412476546\"}]");
} finally {
kafkaCluster.setKafkaConfiguration(config);
}
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AdopterProfile method buildProfile.
public static KafkaInstanceConfiguration buildProfile(String zookeeperContainerMemory, String zookeeperJavaMemory, String zookeeperCpu, String kafkaContainerMemory, String kafkaJavaMemory, String kafkaCpu) {
KafkaInstanceConfiguration config = new KafkaInstanceConfiguration();
config.getKafka().setMaxConnections(Integer.MAX_VALUE);
config.getKafka().setConnectionAttemptsPerSec(Integer.MAX_VALUE);
config.getKafka().setOneInstancePerNode(true);
config.getKafka().setColocateWithZookeeper(BROKER_COLLOCATED_WITH_ZOOKEEPER);
config.setColocateWithZookeeper(BROKER_COLLOCATED_WITH_ZOOKEEPER);
config.getKafka().setContainerMemory(kafkaContainerMemory);
config.getKafka().setContainerCpu(kafkaCpu);
config.getKafka().setJvmXms(kafkaJavaMemory);
config.getKafka().setEnableQuota(false);
config.getZookeeper().setContainerCpu(zookeeperCpu);
config.getZookeeper().setContainerMemory(zookeeperContainerMemory);
config.getZookeeper().setJvmXms(zookeeperJavaMemory);
openListenersAndAccess(config);
return config;
}
Aggregations