use of io.strimzi.kafka.config.model.ConfigModel in project strimzi-kafka-operator by strimzi.
the class ConfigModelTest method testShortValidation.
@Test
public void testShortValidation() {
ConfigModel cm = new ConfigModel();
cm.setType(Type.SHORT);
assertThat(cm.validate("test", "1"), is(emptyList()));
assertThat(cm.validate("test", Short.valueOf(Short.MAX_VALUE).toString()), is(emptyList()));
assertThat(cm.validate("test", Short.valueOf(Short.MIN_VALUE).toString()), is(emptyList()));
assertThat(cm.validate("test", Integer.valueOf((int) Short.MAX_VALUE + 1).toString()), is(singletonList("test has value '32768' which is not a short")));
assertThat(cm.validate("test", Integer.valueOf((int) Short.MIN_VALUE - 1).toString()), is(singletonList("test has value '-32769' which is not a short")));
cm.setMinimum(0);
assertThat(cm.validate("test", "-1"), is(singletonList("test has value -1 which less than the minimum value 0")));
cm.setMaximum(1);
assertThat(cm.validate("test", "2"), is(singletonList("test has value 2 which greater than the maximum value 1")));
}
use of io.strimzi.kafka.config.model.ConfigModel in project strimzi-kafka-operator by strimzi.
the class KafkaBrokerConfigurationDiff method diff.
/**
* Computes diff between two maps. Entries in IGNORABLE_PROPERTIES are skipped
* @param brokerId id of compared broker
* @param desired desired configuration, may be null if the related ConfigMap does not exist yet or no changes are required
* @param brokerConfigs current configuration
* @param configModel default configuration for {@code kafkaVersion} of broker
* @return Collection of AlterConfigOp containing all entries which were changed from current in desired configuration
*/
private Collection<AlterConfigOp> diff(int brokerId, String desired, Config brokerConfigs, Map<String, ConfigModel> configModel) {
if (brokerConfigs == null || desired == null) {
return Collections.emptyList();
}
Map<String, String> currentMap;
Collection<AlterConfigOp> updatedCE = new ArrayList<>();
currentMap = brokerConfigs.entries().stream().collect(Collectors.toMap(ConfigEntry::name, configEntry -> configEntry.value() == null ? "null" : configEntry.value()));
OrderedProperties orderedProperties = new OrderedProperties();
orderedProperties.addStringPairs(desired);
Map<String, String> desiredMap = orderedProperties.asMap();
fillPlaceholderValue(desiredMap, Integer.toString(brokerId));
JsonNode source = patchMapper().configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true).valueToTree(currentMap);
JsonNode target = patchMapper().configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true).valueToTree(desiredMap);
JsonNode jsonDiff = JsonDiff.asJson(source, target);
for (JsonNode d : jsonDiff) {
String pathValue = d.get("path").asText();
String pathValueWithoutSlash = pathValue.substring(1);
Optional<ConfigEntry> optEntry = brokerConfigs.entries().stream().filter(configEntry -> configEntry.name().equals(pathValueWithoutSlash)).findFirst();
String op = d.get("op").asText();
if (optEntry.isPresent()) {
ConfigEntry entry = optEntry.get();
if ("remove".equals(op)) {
removeProperty(configModel, updatedCE, pathValueWithoutSlash, entry);
} else if ("replace".equals(op)) {
// entry is in the current, desired is updated value
updateOrAdd(entry.name(), configModel, desiredMap, updatedCE);
}
} else {
if ("add".equals(op)) {
// entry is not in the current, it is added
updateOrAdd(pathValueWithoutSlash, configModel, desiredMap, updatedCE);
}
}
if ("remove".equals(op)) {
// there is a lot of properties set by default - not having them in desired causes very noisy log output
LOGGER.traceCr(reconciliation, "Kafka Broker {} Config Differs : {}", brokerId, d);
LOGGER.traceCr(reconciliation, "Current Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(source, pathValue));
LOGGER.traceCr(reconciliation, "Desired Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(target, pathValue));
} else {
LOGGER.debugCr(reconciliation, "Kafka Broker {} Config Differs : {}", brokerId, d);
LOGGER.debugCr(reconciliation, "Current Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(source, pathValue));
LOGGER.debugCr(reconciliation, "Desired Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(target, pathValue));
}
}
return updatedCE;
}
use of io.strimzi.kafka.config.model.ConfigModel in project strimzi-kafka-operator by strimzi.
the class DynamicConfSharedST method generateTestCases.
/**
* Method, which dynamically generate test cases based on Kafka version
* @param kafkaVersion specific kafka version
* @return String generated test cases
*/
@SuppressWarnings({ "checkstyle:CyclomaticComplexity" })
private static Map<String, Object> generateTestCases(String kafkaVersion) {
Map<String, ConfigModel> dynamicProperties = KafkaUtils.getDynamicConfigurationProperties(kafkaVersion);
Map<String, Object> testCases = new HashMap<>();
dynamicProperties.forEach((key, value) -> {
Type type = value.getType();
Object stochasticChosenValue;
switch(type) {
case STRING:
switch(key) {
case "compression.type":
List<String> compressionTypes = Arrays.asList("snappy", "gzip", "lz4", "zstd");
stochasticChosenValue = compressionTypes.get(ThreadLocalRandom.current().nextInt(0, compressionTypes.size() - 1));
break;
case "log.message.timestamp.type":
stochasticChosenValue = "LogAppendTime";
break;
case "ssl.protocol":
stochasticChosenValue = "TLSv1.1";
break;
default:
stochasticChosenValue = " ";
}
testCases.put(key, stochasticChosenValue);
break;
case INT:
case LONG:
switch(key) {
case "num.recovery.threads.per.data.dir":
case "log.cleaner.threads":
case "num.network.threads":
case "min.insync.replicas":
case "num.replica.fetchers":
case "num.partitions":
stochasticChosenValue = ThreadLocalRandom.current().nextInt(2, 3);
break;
case "log.cleaner.io.buffer.load.factor":
case "log.retention.ms":
case "max.connections":
case "max.connections.per.ip":
case "background.threads":
stochasticChosenValue = ThreadLocalRandom.current().nextInt(4, 20);
break;
default:
stochasticChosenValue = ThreadLocalRandom.current().nextInt(100, 50_000);
}
testCases.put(key, stochasticChosenValue);
break;
case DOUBLE:
switch(key) {
case "log.cleaner.min.cleanable.dirty.ratio":
case "log.cleaner.min.cleanable.ratio":
stochasticChosenValue = ThreadLocalRandom.current().nextDouble(0, 1);
break;
default:
stochasticChosenValue = ThreadLocalRandom.current().nextDouble(1, 20);
}
testCases.put(key, stochasticChosenValue);
break;
case BOOLEAN:
switch(key) {
case "unclean.leader.election.enable":
case "log.preallocate":
stochasticChosenValue = true;
break;
case "log.message.downconversion.enable":
stochasticChosenValue = false;
break;
default:
stochasticChosenValue = ThreadLocalRandom.current().nextInt(2) == 0;
}
testCases.put(key, stochasticChosenValue);
break;
case LIST:
// log.cleanup.policy = [delete, compact] -> default delete
switch(key) {
case "log.cleanup.policy":
stochasticChosenValue = "compact";
break;
case "ssl.enabled.protocols":
stochasticChosenValue = "TLSv1.1";
break;
default:
stochasticChosenValue = " ";
}
testCases.put(key, stochasticChosenValue);
}
// skipping these configuration, which doesn't work appear in the kafka pod (TODO: investigate why!)
testCases.remove("num.recovery.threads.per.data.dir");
testCases.remove("num.io.threads");
testCases.remove("log.cleaner.dedupe.buffer.size");
testCases.remove("num.partitions");
// skipping these configuration exceptions
testCases.remove("ssl.cipher.suites");
testCases.remove("zookeeper.connection.timeout.ms");
testCases.remove("zookeeper.connect");
});
return testCases;
}
Aggregations