use of io.strimzi.operator.common.model.OrderedProperties in project strimzi by strimzi.
the class ZookeeperPodSetTest method testCustomizedPodSet.
@SuppressWarnings({ "checkstyle:MethodLength" })
@ParallelTest
public void testCustomizedPodSet() {
// Prepare various template values
Map<String, String> spsLabels = TestUtils.map("l1", "v1", "l2", "v2");
Map<String, String> spsAnnos = TestUtils.map("a1", "v1", "a2", "v2");
Map<String, String> podLabels = TestUtils.map("l3", "v3", "l4", "v4");
Map<String, String> podAnnos = TestUtils.map("a3", "v3", "a4", "v4");
HostAlias hostAlias1 = new HostAliasBuilder().withHostnames("my-host-1", "my-host-2").withIp("192.168.1.86").build();
HostAlias hostAlias2 = new HostAliasBuilder().withHostnames("my-host-3").withIp("192.168.1.87").build();
TopologySpreadConstraint tsc1 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/zone").withMaxSkew(1).withWhenUnsatisfiable("DoNotSchedule").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
TopologySpreadConstraint tsc2 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/hostname").withMaxSkew(2).withWhenUnsatisfiable("ScheduleAnyway").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
Affinity affinity = new AffinityBuilder().withNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(new NodeSelectorTermBuilder().addNewMatchExpression().withKey("key1").withOperator("In").withValues("value1", "value2").endMatchExpression().build()).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity().build();
List<Toleration> toleration = singletonList(new TolerationBuilder().withEffect("NoExecute").withKey("key1").withOperator("Equal").withValue("value1").build());
ContainerEnvVar envVar1 = new ContainerEnvVar();
String testEnvOneKey = "TEST_ENV_1";
String testEnvOneValue = "test.env.one";
envVar1.setName(testEnvOneKey);
envVar1.setValue(testEnvOneValue);
ContainerEnvVar envVar2 = new ContainerEnvVar();
String testEnvTwoKey = "TEST_ENV_2";
String testEnvTwoValue = "test.env.two";
envVar2.setName(testEnvTwoKey);
envVar2.setValue(testEnvTwoValue);
SecurityContext securityContext = new SecurityContextBuilder().withPrivileged(false).withReadOnlyRootFilesystem(false).withAllowPrivilegeEscalation(false).withRunAsNonRoot(true).withNewCapabilities().addNewDrop("ALL").endCapabilities().build();
String image = "my-custom-image:latest";
Probe livenessProbe = new Probe();
livenessProbe.setInitialDelaySeconds(1);
livenessProbe.setTimeoutSeconds(2);
livenessProbe.setSuccessThreshold(3);
livenessProbe.setFailureThreshold(4);
livenessProbe.setPeriodSeconds(5);
Probe readinessProbe = new Probe();
readinessProbe.setInitialDelaySeconds(6);
readinessProbe.setTimeoutSeconds(7);
readinessProbe.setSuccessThreshold(8);
readinessProbe.setFailureThreshold(9);
readinessProbe.setPeriodSeconds(10);
// Use the template values in Kafka CR
Kafka kafka = new KafkaBuilder(KAFKA).editSpec().editZookeeper().withImage(image).withNewJvmOptions().withGcLoggingEnabled(true).endJvmOptions().withReadinessProbe(readinessProbe).withLivenessProbe(livenessProbe).withConfig(Map.of("foo", "bar")).withNewTemplate().withNewPodSet().withNewMetadata().withLabels(spsLabels).withAnnotations(spsAnnos).endMetadata().endPodSet().withNewPod().withNewMetadata().withLabels(podLabels).withAnnotations(podAnnos).endMetadata().withPriorityClassName("top-priority").withSchedulerName("my-scheduler").withHostAliases(hostAlias1, hostAlias2).withTopologySpreadConstraints(tsc1, tsc2).withAffinity(affinity).withTolerations(toleration).withEnableServiceLinks(false).withTmpDirSizeLimit("10Mi").withTerminationGracePeriodSeconds(123).withImagePullSecrets(secret1, secret2).withSecurityContext(new PodSecurityContextBuilder().withFsGroup(123L).withRunAsGroup(456L).withRunAsUser(789L).build()).endPod().withNewZookeeperContainer().withEnv(envVar1, envVar2).withSecurityContext(securityContext).endZookeeperContainer().endTemplate().endZookeeper().endSpec().build();
// Test the resources
ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
StrimziPodSet ps = zc.generatePodSet(3, true, null, null, Map.of("special", "annotation"));
assertThat(ps.getMetadata().getName(), is(KafkaResources.zookeeperStatefulSetName(CLUSTER)));
assertThat(ps.getMetadata().getLabels().entrySet().containsAll(spsLabels.entrySet()), is(true));
assertThat(ps.getMetadata().getAnnotations().entrySet().containsAll(spsAnnos.entrySet()), is(true));
assertThat(ps.getSpec().getSelector().getMatchLabels(), is(zc.getSelectorLabels().toMap()));
assertThat(ps.getSpec().getPods().size(), is(3));
// We need to loop through the pods to make sure they have the right values
List<Pod> pods = PodSetUtils.mapsToPods(ps.getSpec().getPods());
for (Pod pod : pods) {
assertThat(pod.getMetadata().getLabels().entrySet().containsAll(podLabels.entrySet()), is(true));
assertThat(pod.getMetadata().getAnnotations().entrySet().containsAll(podAnnos.entrySet()), is(true));
assertThat(pod.getMetadata().getAnnotations().get("special"), is("annotation"));
assertThat(pod.getSpec().getPriorityClassName(), is("top-priority"));
assertThat(pod.getSpec().getSchedulerName(), is("my-scheduler"));
assertThat(pod.getSpec().getHostAliases(), containsInAnyOrder(hostAlias1, hostAlias2));
assertThat(pod.getSpec().getTopologySpreadConstraints(), containsInAnyOrder(tsc1, tsc2));
assertThat(pod.getSpec().getEnableServiceLinks(), is(false));
assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(123L));
assertThat(pod.getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().orElse(null).getEmptyDir().getSizeLimit(), is(new Quantity("10Mi")));
assertThat(pod.getSpec().getImagePullSecrets().size(), is(2));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(true));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true));
assertThat(pod.getSpec().getSecurityContext(), is(notNullValue()));
assertThat(pod.getSpec().getSecurityContext().getFsGroup(), is(123L));
assertThat(pod.getSpec().getSecurityContext().getRunAsGroup(), is(456L));
assertThat(pod.getSpec().getSecurityContext().getRunAsUser(), is(789L));
assertThat(pod.getSpec().getAffinity(), is(affinity));
assertThat(pod.getSpec().getTolerations(), is(toleration));
assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, pod.getSpec().getContainers().get(0).getEnv().stream().filter(env -> testEnvOneKey.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").equals(testEnvOneValue), is(true));
assertThat("Failed to correctly set container environment variable: " + testEnvTwoKey, pod.getSpec().getContainers().get(0).getEnv().stream().filter(env -> testEnvTwoKey.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").equals(testEnvTwoValue), is(true));
assertThat(pod.getSpec().getContainers(), hasItem(allOf(hasProperty("name", equalTo(ZookeeperCluster.ZOOKEEPER_NAME)), hasProperty("securityContext", equalTo(securityContext)))));
assertThat(pod.getSpec().getContainers().size(), is(1));
assertThat(pod.getSpec().getContainers().get(0).getImage(), is(image));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getTimeoutSeconds(), is(livenessProbe.getTimeoutSeconds()));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getInitialDelaySeconds(), is(livenessProbe.getInitialDelaySeconds()));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getFailureThreshold(), is(livenessProbe.getFailureThreshold()));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getSuccessThreshold(), is(livenessProbe.getSuccessThreshold()));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getPeriodSeconds(), is(livenessProbe.getPeriodSeconds()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getTimeoutSeconds(), is(readinessProbe.getTimeoutSeconds()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getInitialDelaySeconds(), is(readinessProbe.getInitialDelaySeconds()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getFailureThreshold(), is(readinessProbe.getFailureThreshold()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getSuccessThreshold(), is(readinessProbe.getSuccessThreshold()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getPeriodSeconds(), is(readinessProbe.getPeriodSeconds()));
assertThat(AbstractModel.containerEnvVars(pod.getSpec().getContainers().get(0)).get(ZookeeperCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is("true"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getName(), is(AbstractModel.VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getMountPath(), is("/var/lib/zookeeper"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getName(), is("zookeeper-metrics-and-logging"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getMountPath(), is("/opt/kafka/custom-config/"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getName(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_MOUNT));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getName(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_MOUNT));
OrderedProperties expectedConfig = new OrderedProperties().addMapPairs(ZookeeperConfiguration.DEFAULTS).addPair("foo", "bar");
OrderedProperties actual = new OrderedProperties().addStringPairs(AbstractModel.containerEnvVars(pod.getSpec().getContainers().get(0)).get(ZookeeperCluster.ENV_VAR_ZOOKEEPER_CONFIGURATION));
assertThat(actual, is(expectedConfig));
}
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi by strimzi.
the class Util method getLoggingDynamicallyUnmodifiableEntries.
/**
* Method parses all dynamically unchangeable entries from the logging configuration.
* @param loggingConfiguration logging configuration to be parsed
* @return String containing all unmodifiable entries.
*/
public static String getLoggingDynamicallyUnmodifiableEntries(String loggingConfiguration) {
OrderedProperties ops = new OrderedProperties();
ops.addStringPairs(loggingConfiguration);
StringBuilder result = new StringBuilder();
for (Map.Entry<String, String> entry : new TreeMap<>(ops.asMap()).entrySet()) {
if (entry.getKey().startsWith("log4j.appender.") && !entry.getKey().equals("monitorInterval")) {
result.append(entry.getKey()).append("=").append(entry.getValue());
}
}
return result.toString();
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi by strimzi.
the class Util method expandVars.
/**
* Load the properties and expand any variables of format ${NAME} inside values with resolved values.
* Variables are resolved by looking up the property names only within the loaded map.
*
* @param env Multiline properties file as String
* @return Multiline properties file as String with variables resolved
*/
public static String expandVars(String env) {
OrderedProperties ops = new OrderedProperties();
ops.addStringPairs(env);
Map<String, String> map = ops.asMap();
StringBuilder resultBuilder = new StringBuilder();
for (Map.Entry<String, String> entry : map.entrySet()) {
resultBuilder.append(entry.getKey() + "=" + expandVar(entry.getValue(), ops.asMap()) + "\n");
}
return resultBuilder.toString();
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class AbstractConnectOperator method createConnectorWatch.
/**
* Create a watch on {@code KafkaConnector} in the given {@code namespace}.
* The watcher will:
* <ul>
* <li>{@linkplain #reconcileConnectors(Reconciliation, CustomResource, KafkaConnectStatus, boolean, String, OrderedProperties)} on the KafkaConnect
* identified by {@code KafkaConnector.metadata.labels[strimzi.io/cluster]}.</li>
* <li>The {@code KafkaConnector} status is updated with the result.</li>
* </ul>
* @param connectOperator The operator for {@code KafkaConnect}.
* @param watchNamespaceOrWildcard The namespace to watch.
* @param selectorLabels Selector labels for filtering the custom resources
*
* @return A future which completes when the watch has been set up.
*/
public static Future<Watch> createConnectorWatch(AbstractConnectOperator<KubernetesClient, KafkaConnect, KafkaConnectList, Resource<KafkaConnect>, KafkaConnectSpec, KafkaConnectStatus> connectOperator, String watchNamespaceOrWildcard, Labels selectorLabels) {
Optional<LabelSelector> selector = (selectorLabels == null || selectorLabels.toMap().isEmpty()) ? Optional.empty() : Optional.of(new LabelSelector(null, selectorLabels.toMap()));
return Util.async(connectOperator.vertx, () -> {
Watch watch = connectOperator.connectorOperator.watch(watchNamespaceOrWildcard, new Watcher<KafkaConnector>() {
@Override
public void eventReceived(Action action, KafkaConnector kafkaConnector) {
String connectorName = kafkaConnector.getMetadata().getName();
String connectorNamespace = kafkaConnector.getMetadata().getNamespace();
String connectorKind = kafkaConnector.getKind();
String connectName = kafkaConnector.getMetadata().getLabels() == null ? null : kafkaConnector.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL);
String connectNamespace = connectorNamespace;
switch(action) {
case ADDED:
case DELETED:
case MODIFIED:
if (connectName != null) {
// Check whether a KafkaConnect exists
connectOperator.resourceOperator.getAsync(connectNamespace, connectName).compose(connect -> {
KafkaConnectApi apiClient = connectOperator.connectClientProvider.apply(connectOperator.vertx);
if (connect == null) {
Reconciliation r = new Reconciliation("connector-watch", connectOperator.kind(), kafkaConnector.getMetadata().getNamespace(), connectName);
updateStatus(r, noConnectCluster(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator);
LOGGER.infoCr(r, "{} {} in namespace {} was {}, but Connect cluster {} does not exist", connectorKind, connectorName, connectorNamespace, action, connectName);
return Future.succeededFuture();
} else {
// grab the lock and call reconcileConnectors()
// (i.e. short circuit doing a whole KafkaConnect reconciliation).
Reconciliation reconciliation = new Reconciliation("connector-watch", connectOperator.kind(), kafkaConnector.getMetadata().getNamespace(), connectName);
if (!Util.matchesSelector(selector, connect)) {
LOGGER.debugCr(reconciliation, "{} {} in namespace {} was {}, but Connect cluster {} does not match label selector {} and will be ignored", connectorKind, connectorName, connectorNamespace, action, connectName, selectorLabels);
return Future.succeededFuture();
} else if (connect.getSpec() != null && connect.getSpec().getReplicas() == 0) {
LOGGER.infoCr(reconciliation, "{} {} in namespace {} was {}, but Connect cluster {} has 0 replicas", connectorKind, connectorName, connectorNamespace, action, connectName);
updateStatus(reconciliation, zeroReplicas(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator);
return Future.succeededFuture();
} else {
LOGGER.infoCr(reconciliation, "{} {} in namespace {} was {}", connectorKind, connectorName, connectorNamespace, action);
return connectOperator.withLock(reconciliation, LOCK_TIMEOUT_MS, () -> connectOperator.reconcileConnectorAndHandleResult(reconciliation, KafkaConnectResources.qualifiedServiceName(connectName, connectNamespace), apiClient, isUseResources(connect), kafkaConnector.getMetadata().getName(), action == Action.DELETED ? null : kafkaConnector).compose(reconcileResult -> {
LOGGER.infoCr(reconciliation, "reconciled");
return Future.succeededFuture(reconcileResult);
}));
}
}
});
} else {
updateStatus(new Reconciliation("connector-watch", connectOperator.kind(), kafkaConnector.getMetadata().getNamespace(), null), new InvalidResourceException("Resource lacks label '" + Labels.STRIMZI_CLUSTER_LABEL + "': No connect cluster in which to create this connector."), kafkaConnector, connectOperator.connectorOperator);
}
break;
case ERROR:
LOGGER.errorCr(new Reconciliation("connector-watch", connectorKind, connectName, connectorNamespace), "Failed {} {} in namespace {} ", connectorKind, connectorName, connectorNamespace);
break;
default:
LOGGER.errorCr(new Reconciliation("connector-watch", connectorKind, connectName, connectorNamespace), "Unknown action: {} {} in namespace {}", connectorKind, connectorName, connectorNamespace);
}
}
@Override
public void onClose(WatcherException e) {
if (e != null) {
throw new KubernetesClientException(e.getMessage());
}
}
});
return watch;
});
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class KafkaConnectApiTest method testHierarchy.
@IsolatedTest
public void testHierarchy() {
String rootLevel = "TRACE";
String desired = "log4j.rootLogger=" + rootLevel + ", CONSOLE\n" + "log4j.logger.oorg.apache.zookeeper=WARN\n" + "log4j.logger.oorg.I0Itec.zkclient=INFO\n" + "log4j.logger.oorg.reflections.Reflection=INFO\n" + "log4j.logger.oorg.reflections=FATAL\n" + "log4j.logger.foo=WARN\n" + "log4j.logger.foo.bar=TRACE\n" + "log4j.logger.oorg.eclipse.jetty.util=DEBUG\n" + "log4j.logger.foo.bar.quux=DEBUG";
KafkaConnectApiImpl client = new KafkaConnectApiImpl(vertx);
OrderedProperties ops = new OrderedProperties();
ops.addStringPairs(desired);
assertEquals("TRACE", client.getEffectiveLevel("foo.bar", ops.asMap()));
assertEquals("WARN", client.getEffectiveLevel("foo.lala", ops.asMap()));
assertEquals(rootLevel, client.getEffectiveLevel("bar.faa", ops.asMap()));
assertEquals("TRACE", client.getEffectiveLevel("org", ops.asMap()));
assertEquals("DEBUG", client.getEffectiveLevel("oorg.eclipse.jetty.util.thread.strategy.EatWhatYouKill", ops.asMap()));
assertEquals(rootLevel, client.getEffectiveLevel("oorg.eclipse.group.art", ops.asMap()));
}
Aggregations