use of io.strimzi.operator.common.model.OrderedProperties in project strimzi by strimzi.
the class TestConfigurationWithoutDefaults method testConfigurationStringWithForbiddenKeysInUpperCase.
@ParallelTest
public void testConfigurationStringWithForbiddenKeysInUpperCase() {
String configuration = "var1=aaa" + LINE_SEPARATOR + "var2=bbb" + LINE_SEPARATOR + "var3=ccc" + LINE_SEPARATOR + "FORBIDDEN.OPTION=ddd" + LINE_SEPARATOR;
OrderedProperties expectedConfiguration = createWithDefaults("var3", "ccc", "var2", "bbb", "var1", "aaa");
AbstractConfiguration config = new TestConfiguration(configuration);
assertThat(config.asOrderedProperties(), is(expectedConfiguration));
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi by strimzi.
the class AbstractConnectOperator method reconcileConnectors.
/**
* Reconcile all the connectors selected by the given connect instance, updated each connectors status with the result.
* @param reconciliation The reconciliation
* @param connect The connector
* @param connectStatus Status of the KafkaConnect resource (will be used to set the available
* connector plugins)
* @param scaledToZero Indicated whether the related Connect cluster is currently scaled to 0 replicas
* @return A future, failed if any of the connectors' statuses could not be updated.
*/
protected Future<Void> reconcileConnectors(Reconciliation reconciliation, T connect, S connectStatus, boolean scaledToZero, String desiredLogging, OrderedProperties defaultLogging) {
String connectName = connect.getMetadata().getName();
String namespace = connect.getMetadata().getNamespace();
String host = KafkaConnectResources.qualifiedServiceName(connectName, namespace);
if (!isUseResources(connect)) {
return Future.succeededFuture();
}
if (scaledToZero) {
return connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())).compose(connectors -> CompositeFuture.join(connectors.stream().map(connector -> maybeUpdateConnectorStatus(reconciliation, connector, null, zeroReplicas(namespace, connectName))).collect(Collectors.toList()))).map((Void) null);
}
KafkaConnectApi apiClient = connectClientProvider.apply(vertx);
return CompositeFuture.join(apiClient.list(host, port), connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())), apiClient.listConnectorPlugins(reconciliation, host, port), apiClient.updateConnectLoggers(reconciliation, host, port, desiredLogging, defaultLogging)).compose(cf -> {
List<String> runningConnectorNames = cf.resultAt(0);
List<KafkaConnector> desiredConnectors = cf.resultAt(1);
List<ConnectorPlugin> connectorPlugins = cf.resultAt(2);
LOGGER.debugCr(reconciliation, "Setting list of connector plugins in Kafka Connect status");
connectStatus.setConnectorPlugins(connectorPlugins);
Set<String> deleteConnectorNames = new HashSet<>(runningConnectorNames);
deleteConnectorNames.removeAll(desiredConnectors.stream().map(c -> c.getMetadata().getName()).collect(Collectors.toSet()));
LOGGER.debugCr(reconciliation, "{} cluster: delete connectors: {}", kind(), deleteConnectorNames);
Stream<Future<Void>> deletionFutures = deleteConnectorNames.stream().map(connectorName -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connectorName, null));
LOGGER.debugCr(reconciliation, "{} cluster: required connectors: {}", kind(), desiredConnectors);
Stream<Future<Void>> createUpdateFutures = desiredConnectors.stream().map(connector -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connector.getMetadata().getName(), connector));
return CompositeFuture.join(Stream.concat(deletionFutures, createUpdateFutures).collect(Collectors.toList())).map((Void) null);
}).recover(error -> {
if (error instanceof ConnectTimeoutException) {
Promise<Void> connectorStatuses = Promise.promise();
LOGGER.warnCr(reconciliation, "Failed to connect to the REST API => trying to update the connector status");
connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())).compose(connectors -> CompositeFuture.join(connectors.stream().map(connector -> maybeUpdateConnectorStatus(reconciliation, connector, null, error)).collect(Collectors.toList()))).onComplete(ignore -> connectorStatuses.fail(error));
return connectorStatuses.future();
} else {
return Future.failedFuture(error);
}
});
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi by strimzi.
the class KafkaConnectApiTest method testChangeLoggers.
@IsolatedTest
public void testChangeLoggers(VertxTestContext context) throws InterruptedException {
String desired = "log4j.rootLogger=TRACE, CONSOLE\n" + "log4j.logger.org.apache.zookeeper=WARN\n" + "log4j.logger.org.I0Itec.zkclient=INFO\n" + "log4j.logger.org.reflections.Reflection=INFO\n" + "log4j.logger.org.reflections=FATAL\n" + "log4j.logger.foo=WARN\n" + "log4j.logger.foo.bar=TRACE\n" + "log4j.logger.foo.bar.quux=DEBUG";
KafkaConnectApi client = new KafkaConnectApiImpl(vertx);
Checkpoint async = context.checkpoint();
OrderedProperties ops = new OrderedProperties();
ops.addStringPairs(desired);
client.updateConnectLoggers(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, desired, ops).onComplete(context.succeeding(wasChanged -> context.verify(() -> assertEquals(true, wasChanged)))).compose(a -> client.listConnectLoggers(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT).onComplete(context.succeeding(map -> context.verify(() -> {
assertThat(map.get("root"), is("TRACE"));
assertThat(map.get("org.apache.zookeeper"), is("WARN"));
assertThat(map.get("org.I0Itec.zkclient"), is("INFO"));
assertThat(map.get("org.reflections"), is("FATAL"));
assertThat(map.get("org.reflections.Reflection"), is("INFO"));
assertThat(map.get("org.reflections.Reflection"), is("INFO"));
assertThat(map.get("foo"), is("WARN"));
assertThat(map.get("foo.bar"), is("TRACE"));
assertThat(map.get("foo.bar.quux"), is("DEBUG"));
})))).compose(a -> client.updateConnectLoggers(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, desired, ops).onComplete(context.succeeding(wasChanged -> context.verify(() -> {
assertEquals(false, wasChanged);
async.flag();
}))));
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi by strimzi.
the class KafkaConnectApiTest method testHierarchy.
@IsolatedTest
public void testHierarchy() {
String rootLevel = "TRACE";
String desired = "log4j.rootLogger=" + rootLevel + ", CONSOLE\n" + "log4j.logger.oorg.apache.zookeeper=WARN\n" + "log4j.logger.oorg.I0Itec.zkclient=INFO\n" + "log4j.logger.oorg.reflections.Reflection=INFO\n" + "log4j.logger.oorg.reflections=FATAL\n" + "log4j.logger.foo=WARN\n" + "log4j.logger.foo.bar=TRACE\n" + "log4j.logger.oorg.eclipse.jetty.util=DEBUG\n" + "log4j.logger.foo.bar.quux=DEBUG";
KafkaConnectApiImpl client = new KafkaConnectApiImpl(vertx);
OrderedProperties ops = new OrderedProperties();
ops.addStringPairs(desired);
assertEquals("TRACE", client.getEffectiveLevel("foo.bar", ops.asMap()));
assertEquals("WARN", client.getEffectiveLevel("foo.lala", ops.asMap()));
assertEquals(rootLevel, client.getEffectiveLevel("bar.faa", ops.asMap()));
assertEquals("TRACE", client.getEffectiveLevel("org", ops.asMap()));
assertEquals("DEBUG", client.getEffectiveLevel("oorg.eclipse.jetty.util.thread.strategy.EatWhatYouKill", ops.asMap()));
assertEquals(rootLevel, client.getEffectiveLevel("oorg.eclipse.group.art", ops.asMap()));
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi by strimzi.
the class ZookeeperClusterTest method checkStatefulSet.
private void checkStatefulSet(StatefulSet sts) {
assertThat(sts.getMetadata().getName(), is(KafkaResources.zookeeperStatefulSetName(cluster)));
// ... in the same namespace ...
assertThat(sts.getMetadata().getNamespace(), is(namespace));
// ... with these labels
assertThat(sts.getMetadata().getLabels(), is(expectedLabels()));
assertThat(sts.getSpec().getSelector().getMatchLabels(), is(expectedSelectorLabels()));
assertThat(sts.getSpec().getTemplate().getSpec().getSchedulerName(), is("default-scheduler"));
List<Container> containers = sts.getSpec().getTemplate().getSpec().getContainers();
assertThat(containers.size(), is(1));
// checks on the main Zookeeper container
assertThat(sts.getSpec().getReplicas(), is(replicas));
assertThat(sts.getSpec().getPodManagementPolicy(), is(PodManagementPolicy.PARALLEL.toValue()));
assertThat(containers.get(0).getImage(), is(image + "-zk"));
assertThat(containers.get(0).getLivenessProbe().getTimeoutSeconds(), is(healthTimeout));
assertThat(containers.get(0).getLivenessProbe().getInitialDelaySeconds(), is(healthDelay));
assertThat(containers.get(0).getLivenessProbe().getFailureThreshold(), is(10));
assertThat(containers.get(0).getLivenessProbe().getSuccessThreshold(), is(4));
assertThat(containers.get(0).getLivenessProbe().getPeriodSeconds(), is(33));
assertThat(containers.get(0).getReadinessProbe().getTimeoutSeconds(), is(healthTimeout));
assertThat(containers.get(0).getReadinessProbe().getInitialDelaySeconds(), is(healthDelay));
assertThat(containers.get(0).getReadinessProbe().getFailureThreshold(), is(10));
assertThat(containers.get(0).getReadinessProbe().getSuccessThreshold(), is(4));
assertThat(containers.get(0).getReadinessProbe().getPeriodSeconds(), is(33));
OrderedProperties expectedConfig = new OrderedProperties().addMapPairs(ZookeeperConfiguration.DEFAULTS).addPair("foo", "bar");
OrderedProperties actual = new OrderedProperties().addStringPairs(AbstractModel.containerEnvVars(containers.get(0)).get(ZookeeperCluster.ENV_VAR_ZOOKEEPER_CONFIGURATION));
assertThat(actual, is(expectedConfig));
assertThat(AbstractModel.containerEnvVars(containers.get(0)).get(ZookeeperCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is(Boolean.toString(AbstractModel.DEFAULT_JVM_GC_LOGGING_ENABLED)));
assertThat(containers.get(0).getVolumeMounts().get(0).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(containers.get(0).getVolumeMounts().get(0).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(containers.get(0).getVolumeMounts().get(0).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(containers.get(0).getVolumeMounts().get(0).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(containers.get(0).getVolumeMounts().get(3).getName(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_NAME));
assertThat(containers.get(0).getVolumeMounts().get(3).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_MOUNT));
assertThat(containers.get(0).getVolumeMounts().get(4).getName(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_NAME));
assertThat(containers.get(0).getVolumeMounts().get(4).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_MOUNT));
assertThat(sts.getSpec().getTemplate().getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().orElseThrow().getEmptyDir().getSizeLimit(), is(new Quantity(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE)));
}
Aggregations