use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class Util method expandVars.
/**
* Load the properties and expand any variables of format ${NAME} inside values with resolved values.
* Variables are resolved by looking up the property names only within the loaded map.
*
* @param env Multiline properties file as String
* @return Multiline properties file as String with variables resolved
*/
public static String expandVars(String env) {
OrderedProperties ops = new OrderedProperties();
ops.addStringPairs(env);
Map<String, String> map = ops.asMap();
StringBuilder resultBuilder = new StringBuilder();
for (Map.Entry<String, String> entry : map.entrySet()) {
resultBuilder.append(entry.getKey() + "=" + expandVar(entry.getValue(), ops.asMap()) + "\n");
}
return resultBuilder.toString();
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class Util method getLoggingDynamicallyUnmodifiableEntries.
/**
* Method parses all dynamically unchangeable entries from the logging configuration.
* @param loggingConfiguration logging configuration to be parsed
* @return String containing all unmodifiable entries.
*/
public static String getLoggingDynamicallyUnmodifiableEntries(String loggingConfiguration) {
OrderedProperties ops = new OrderedProperties();
ops.addStringPairs(loggingConfiguration);
StringBuilder result = new StringBuilder();
for (Map.Entry<String, String> entry : new TreeMap<>(ops.asMap()).entrySet()) {
if (entry.getKey().startsWith("log4j.appender.") && !entry.getKey().equals("monitorInterval")) {
result.append(entry.getKey()).append("=").append(entry.getValue());
}
}
return result.toString();
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class AbstractModel method getOrderedProperties.
/**
* Read a config file and returns the properties in a deterministic order.
*
* @param reconciliation The reconciliation
* @param configFileName The filename.
* @return The OrderedProperties of the inputted file.
*/
public static OrderedProperties getOrderedProperties(Reconciliation reconciliation, String configFileName) {
if (configFileName == null || configFileName.isEmpty()) {
throw new IllegalArgumentException("configFileName must be non-empty string");
}
OrderedProperties properties = new OrderedProperties();
InputStream is = AbstractModel.class.getResourceAsStream("/" + configFileName);
if (is == null) {
LOGGER.warnCr(reconciliation, "Cannot find resource '{}'", configFileName);
} else {
try {
properties.addStringPairs(is);
} catch (IOException e) {
LOGGER.warnCr(reconciliation, "Unable to read default log config from '{}'", configFileName);
} finally {
try {
is.close();
} catch (IOException e) {
LOGGER.errorCr(reconciliation, "Failed to close stream. Reason: " + e.getMessage());
}
}
}
return properties;
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class AbstractModel method loggingConfiguration.
/**
* Generates the logging configuration as a String. The configuration is generated based on the default logging
* configuration files from resources, the (optional) inline logging configuration from the custom resource
* and the (optional) external logging configuration in a user-provided ConfigMap.
*
* @param logging The logging configuration from the custom resource
* @param externalCm The user-provided ConfigMap with custom Log4j / Log4j2 file
*
* @return String with the Log4j / Log4j2 properties used for configuration
*/
public String loggingConfiguration(Logging logging, ConfigMap externalCm) {
if (logging instanceof InlineLogging) {
InlineLogging inlineLogging = (InlineLogging) logging;
OrderedProperties newSettings = getDefaultLogConfig();
if (inlineLogging.getLoggers() != null) {
// Inline logging as specified and some loggers are configured
if (shouldPatchLoggerAppender()) {
String rootAppenderName = getRootAppenderNamesFromDefaultLoggingConfig(newSettings);
String newRootLogger = inlineLogging.getLoggers().get("log4j.rootLogger");
newSettings.addMapPairs(inlineLogging.getLoggers());
if (newRootLogger != null && !rootAppenderName.isEmpty() && !newRootLogger.contains(",")) {
// this should never happen as appender name is added in default configuration
LOGGER.debugCr(reconciliation, "Newly set rootLogger does not contain appender. Setting appender to {}.", rootAppenderName);
String level = newSettings.asMap().get("log4j.rootLogger");
newSettings.addPair("log4j.rootLogger", level + ", " + rootAppenderName);
}
} else {
newSettings.addMapPairs(inlineLogging.getLoggers());
}
}
return createLog4jProperties(newSettings);
} else if (logging instanceof ExternalLogging) {
ExternalLogging externalLogging = (ExternalLogging) logging;
if (externalLogging.getValueFrom() != null && externalLogging.getValueFrom().getConfigMapKeyRef() != null && externalLogging.getValueFrom().getConfigMapKeyRef().getKey() != null) {
if (externalCm != null && externalCm.getData() != null && externalCm.getData().containsKey(externalLogging.getValueFrom().getConfigMapKeyRef().getKey())) {
return maybeAddMonitorIntervalToExternalLogging(externalCm.getData().get(externalLogging.getValueFrom().getConfigMapKeyRef().getKey()));
} else {
throw new InvalidResourceException(String.format("ConfigMap %s with external logging configuration does not exist or doesn't contain the configuration under the %s key.", externalLogging.getValueFrom().getConfigMapKeyRef().getName(), externalLogging.getValueFrom().getConfigMapKeyRef().getKey()));
}
} else {
throw new InvalidResourceException("Property logging.valueFrom has to be specified when using external logging.");
}
} else {
LOGGER.debugCr(reconciliation, "logging is not set, using default loggers");
return createLog4jProperties(getDefaultLogConfig());
}
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class ZookeeperPodSetTest method testCustomizedPodSet.
@SuppressWarnings({ "checkstyle:MethodLength" })
@ParallelTest
public void testCustomizedPodSet() {
// Prepare various template values
Map<String, String> spsLabels = TestUtils.map("l1", "v1", "l2", "v2");
Map<String, String> spsAnnos = TestUtils.map("a1", "v1", "a2", "v2");
Map<String, String> podLabels = TestUtils.map("l3", "v3", "l4", "v4");
Map<String, String> podAnnos = TestUtils.map("a3", "v3", "a4", "v4");
HostAlias hostAlias1 = new HostAliasBuilder().withHostnames("my-host-1", "my-host-2").withIp("192.168.1.86").build();
HostAlias hostAlias2 = new HostAliasBuilder().withHostnames("my-host-3").withIp("192.168.1.87").build();
TopologySpreadConstraint tsc1 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/zone").withMaxSkew(1).withWhenUnsatisfiable("DoNotSchedule").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
TopologySpreadConstraint tsc2 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/hostname").withMaxSkew(2).withWhenUnsatisfiable("ScheduleAnyway").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
Affinity affinity = new AffinityBuilder().withNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(new NodeSelectorTermBuilder().addNewMatchExpression().withKey("key1").withOperator("In").withValues("value1", "value2").endMatchExpression().build()).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity().build();
List<Toleration> toleration = singletonList(new TolerationBuilder().withEffect("NoExecute").withKey("key1").withOperator("Equal").withValue("value1").build());
ContainerEnvVar envVar1 = new ContainerEnvVar();
String testEnvOneKey = "TEST_ENV_1";
String testEnvOneValue = "test.env.one";
envVar1.setName(testEnvOneKey);
envVar1.setValue(testEnvOneValue);
ContainerEnvVar envVar2 = new ContainerEnvVar();
String testEnvTwoKey = "TEST_ENV_2";
String testEnvTwoValue = "test.env.two";
envVar2.setName(testEnvTwoKey);
envVar2.setValue(testEnvTwoValue);
SecurityContext securityContext = new SecurityContextBuilder().withPrivileged(false).withReadOnlyRootFilesystem(false).withAllowPrivilegeEscalation(false).withRunAsNonRoot(true).withNewCapabilities().addNewDrop("ALL").endCapabilities().build();
String image = "my-custom-image:latest";
Probe livenessProbe = new Probe();
livenessProbe.setInitialDelaySeconds(1);
livenessProbe.setTimeoutSeconds(2);
livenessProbe.setSuccessThreshold(3);
livenessProbe.setFailureThreshold(4);
livenessProbe.setPeriodSeconds(5);
Probe readinessProbe = new Probe();
readinessProbe.setInitialDelaySeconds(6);
readinessProbe.setTimeoutSeconds(7);
readinessProbe.setSuccessThreshold(8);
readinessProbe.setFailureThreshold(9);
readinessProbe.setPeriodSeconds(10);
// Use the template values in Kafka CR
Kafka kafka = new KafkaBuilder(KAFKA).editSpec().editZookeeper().withImage(image).withNewJvmOptions().withGcLoggingEnabled(true).endJvmOptions().withReadinessProbe(readinessProbe).withLivenessProbe(livenessProbe).withConfig(Map.of("foo", "bar")).withNewTemplate().withNewPodSet().withNewMetadata().withLabels(spsLabels).withAnnotations(spsAnnos).endMetadata().endPodSet().withNewPod().withNewMetadata().withLabels(podLabels).withAnnotations(podAnnos).endMetadata().withPriorityClassName("top-priority").withSchedulerName("my-scheduler").withHostAliases(hostAlias1, hostAlias2).withTopologySpreadConstraints(tsc1, tsc2).withAffinity(affinity).withTolerations(toleration).withEnableServiceLinks(false).withTmpDirSizeLimit("10Mi").withTerminationGracePeriodSeconds(123).withImagePullSecrets(secret1, secret2).withSecurityContext(new PodSecurityContextBuilder().withFsGroup(123L).withRunAsGroup(456L).withRunAsUser(789L).build()).endPod().withNewZookeeperContainer().withEnv(envVar1, envVar2).withSecurityContext(securityContext).endZookeeperContainer().endTemplate().endZookeeper().endSpec().build();
// Test the resources
ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
StrimziPodSet ps = zc.generatePodSet(3, true, null, null, Map.of("special", "annotation"));
assertThat(ps.getMetadata().getName(), is(KafkaResources.zookeeperStatefulSetName(CLUSTER)));
assertThat(ps.getMetadata().getLabels().entrySet().containsAll(spsLabels.entrySet()), is(true));
assertThat(ps.getMetadata().getAnnotations().entrySet().containsAll(spsAnnos.entrySet()), is(true));
assertThat(ps.getSpec().getSelector().getMatchLabels(), is(zc.getSelectorLabels().toMap()));
assertThat(ps.getSpec().getPods().size(), is(3));
// We need to loop through the pods to make sure they have the right values
List<Pod> pods = PodSetUtils.mapsToPods(ps.getSpec().getPods());
for (Pod pod : pods) {
assertThat(pod.getMetadata().getLabels().entrySet().containsAll(podLabels.entrySet()), is(true));
assertThat(pod.getMetadata().getAnnotations().entrySet().containsAll(podAnnos.entrySet()), is(true));
assertThat(pod.getMetadata().getAnnotations().get("special"), is("annotation"));
assertThat(pod.getSpec().getPriorityClassName(), is("top-priority"));
assertThat(pod.getSpec().getSchedulerName(), is("my-scheduler"));
assertThat(pod.getSpec().getHostAliases(), containsInAnyOrder(hostAlias1, hostAlias2));
assertThat(pod.getSpec().getTopologySpreadConstraints(), containsInAnyOrder(tsc1, tsc2));
assertThat(pod.getSpec().getEnableServiceLinks(), is(false));
assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(123L));
assertThat(pod.getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().orElse(null).getEmptyDir().getSizeLimit(), is(new Quantity("10Mi")));
assertThat(pod.getSpec().getImagePullSecrets().size(), is(2));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(true));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true));
assertThat(pod.getSpec().getSecurityContext(), is(notNullValue()));
assertThat(pod.getSpec().getSecurityContext().getFsGroup(), is(123L));
assertThat(pod.getSpec().getSecurityContext().getRunAsGroup(), is(456L));
assertThat(pod.getSpec().getSecurityContext().getRunAsUser(), is(789L));
assertThat(pod.getSpec().getAffinity(), is(affinity));
assertThat(pod.getSpec().getTolerations(), is(toleration));
assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, pod.getSpec().getContainers().get(0).getEnv().stream().filter(env -> testEnvOneKey.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").equals(testEnvOneValue), is(true));
assertThat("Failed to correctly set container environment variable: " + testEnvTwoKey, pod.getSpec().getContainers().get(0).getEnv().stream().filter(env -> testEnvTwoKey.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").equals(testEnvTwoValue), is(true));
assertThat(pod.getSpec().getContainers(), hasItem(allOf(hasProperty("name", equalTo(ZookeeperCluster.ZOOKEEPER_NAME)), hasProperty("securityContext", equalTo(securityContext)))));
assertThat(pod.getSpec().getContainers().size(), is(1));
assertThat(pod.getSpec().getContainers().get(0).getImage(), is(image));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getTimeoutSeconds(), is(livenessProbe.getTimeoutSeconds()));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getInitialDelaySeconds(), is(livenessProbe.getInitialDelaySeconds()));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getFailureThreshold(), is(livenessProbe.getFailureThreshold()));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getSuccessThreshold(), is(livenessProbe.getSuccessThreshold()));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getPeriodSeconds(), is(livenessProbe.getPeriodSeconds()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getTimeoutSeconds(), is(readinessProbe.getTimeoutSeconds()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getInitialDelaySeconds(), is(readinessProbe.getInitialDelaySeconds()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getFailureThreshold(), is(readinessProbe.getFailureThreshold()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getSuccessThreshold(), is(readinessProbe.getSuccessThreshold()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getPeriodSeconds(), is(readinessProbe.getPeriodSeconds()));
assertThat(AbstractModel.containerEnvVars(pod.getSpec().getContainers().get(0)).get(ZookeeperCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is("true"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getName(), is(AbstractModel.VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getMountPath(), is("/var/lib/zookeeper"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getName(), is("zookeeper-metrics-and-logging"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getMountPath(), is("/opt/kafka/custom-config/"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getName(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_MOUNT));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getName(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_MOUNT));
OrderedProperties expectedConfig = new OrderedProperties().addMapPairs(ZookeeperConfiguration.DEFAULTS).addPair("foo", "bar");
OrderedProperties actual = new OrderedProperties().addStringPairs(AbstractModel.containerEnvVars(pod.getSpec().getContainers().get(0)).get(ZookeeperCluster.ENV_VAR_ZOOKEEPER_CONFIGURATION));
assertThat(actual, is(expectedConfig));
}
}
Aggregations