use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class TestConfigurationWithoutDefaults method testJsonWithDuplicates.
@ParallelTest
public void testJsonWithDuplicates() {
JsonObject configuration = new JsonObject().put("var1", "aaa").put("var2", "bbb").put("var3", "ccc").put("var2", "ddd");
OrderedProperties expectedConfiguration = createWithDefaults("var3", "ccc", "var2", "ddd", "var1", "aaa");
AbstractConfiguration config = new TestConfiguration(configuration);
assertThat(config.asOrderedProperties(), is(expectedConfiguration));
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class TestConfigurationWithoutDefaults method testWithHostPort.
@ParallelTest
public void testWithHostPort() {
JsonObject configuration = new JsonObject().put("option.with.port", "my-server:9092");
OrderedProperties expectedConfiguration = createWithDefaults("option.with.port", "my-server:9092");
AbstractConfiguration config = new TestConfiguration(configuration);
assertThat(config.asOrderedProperties(), is(expectedConfiguration));
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class TestConfigurationWithoutDefaults method testConfigurationStringWithDuplicates.
@ParallelTest
public void testConfigurationStringWithDuplicates() {
String configuration = "var1=aaa" + LINE_SEPARATOR + "var2=bbb" + LINE_SEPARATOR + "var3=ccc" + LINE_SEPARATOR + "var2=ddd" + LINE_SEPARATOR;
OrderedProperties expectedConfiguration = createWithDefaults("var3", "ccc", "var2", "ddd", "var1", "aaa");
AbstractConfiguration config = new TestConfiguration(configuration);
assertThat(config.asOrderedProperties(), is(expectedConfiguration));
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class ZookeeperClusterTest method checkStatefulSet.
private void checkStatefulSet(StatefulSet sts) {
assertThat(sts.getMetadata().getName(), is(KafkaResources.zookeeperStatefulSetName(cluster)));
// ... in the same namespace ...
assertThat(sts.getMetadata().getNamespace(), is(namespace));
// ... with these labels
assertThat(sts.getMetadata().getLabels(), is(expectedLabels()));
assertThat(sts.getSpec().getSelector().getMatchLabels(), is(expectedSelectorLabels()));
assertThat(sts.getSpec().getTemplate().getSpec().getSchedulerName(), is("default-scheduler"));
List<Container> containers = sts.getSpec().getTemplate().getSpec().getContainers();
assertThat(containers.size(), is(1));
// checks on the main Zookeeper container
assertThat(sts.getSpec().getReplicas(), is(replicas));
assertThat(sts.getSpec().getPodManagementPolicy(), is(PodManagementPolicy.PARALLEL.toValue()));
assertThat(containers.get(0).getImage(), is(image + "-zk"));
assertThat(containers.get(0).getLivenessProbe().getTimeoutSeconds(), is(healthTimeout));
assertThat(containers.get(0).getLivenessProbe().getInitialDelaySeconds(), is(healthDelay));
assertThat(containers.get(0).getLivenessProbe().getFailureThreshold(), is(10));
assertThat(containers.get(0).getLivenessProbe().getSuccessThreshold(), is(4));
assertThat(containers.get(0).getLivenessProbe().getPeriodSeconds(), is(33));
assertThat(containers.get(0).getReadinessProbe().getTimeoutSeconds(), is(healthTimeout));
assertThat(containers.get(0).getReadinessProbe().getInitialDelaySeconds(), is(healthDelay));
assertThat(containers.get(0).getReadinessProbe().getFailureThreshold(), is(10));
assertThat(containers.get(0).getReadinessProbe().getSuccessThreshold(), is(4));
assertThat(containers.get(0).getReadinessProbe().getPeriodSeconds(), is(33));
OrderedProperties expectedConfig = new OrderedProperties().addMapPairs(ZookeeperConfiguration.DEFAULTS).addPair("foo", "bar");
OrderedProperties actual = new OrderedProperties().addStringPairs(AbstractModel.containerEnvVars(containers.get(0)).get(ZookeeperCluster.ENV_VAR_ZOOKEEPER_CONFIGURATION));
assertThat(actual, is(expectedConfig));
assertThat(AbstractModel.containerEnvVars(containers.get(0)).get(ZookeeperCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is(Boolean.toString(AbstractModel.DEFAULT_JVM_GC_LOGGING_ENABLED)));
assertThat(containers.get(0).getVolumeMounts().get(0).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(containers.get(0).getVolumeMounts().get(0).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(containers.get(0).getVolumeMounts().get(0).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(containers.get(0).getVolumeMounts().get(0).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(containers.get(0).getVolumeMounts().get(3).getName(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_NAME));
assertThat(containers.get(0).getVolumeMounts().get(3).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_MOUNT));
assertThat(containers.get(0).getVolumeMounts().get(4).getName(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_NAME));
assertThat(containers.get(0).getVolumeMounts().get(4).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_MOUNT));
assertThat(sts.getSpec().getTemplate().getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().orElseThrow().getEmptyDir().getSizeLimit(), is(new Quantity(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE)));
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi by strimzi.
the class AbstractConnectOperator method createConnectorWatch.
/**
* Create a watch on {@code KafkaConnector} in the given {@code namespace}.
* The watcher will:
* <ul>
* <li>{@linkplain #reconcileConnectors(Reconciliation, CustomResource, KafkaConnectStatus, boolean, String, OrderedProperties)} on the KafkaConnect
* identified by {@code KafkaConnector.metadata.labels[strimzi.io/cluster]}.</li>
* <li>The {@code KafkaConnector} status is updated with the result.</li>
* </ul>
* @param connectOperator The operator for {@code KafkaConnect}.
* @param watchNamespaceOrWildcard The namespace to watch.
* @param selectorLabels Selector labels for filtering the custom resources
*
* @return A future which completes when the watch has been set up.
*/
public static Future<Watch> createConnectorWatch(AbstractConnectOperator<KubernetesClient, KafkaConnect, KafkaConnectList, Resource<KafkaConnect>, KafkaConnectSpec, KafkaConnectStatus> connectOperator, String watchNamespaceOrWildcard, Labels selectorLabels) {
Optional<LabelSelector> selector = (selectorLabels == null || selectorLabels.toMap().isEmpty()) ? Optional.empty() : Optional.of(new LabelSelector(null, selectorLabels.toMap()));
return Util.async(connectOperator.vertx, () -> {
Watch watch = connectOperator.connectorOperator.watch(watchNamespaceOrWildcard, new Watcher<KafkaConnector>() {
@Override
public void eventReceived(Action action, KafkaConnector kafkaConnector) {
String connectorName = kafkaConnector.getMetadata().getName();
String connectorNamespace = kafkaConnector.getMetadata().getNamespace();
String connectorKind = kafkaConnector.getKind();
String connectName = kafkaConnector.getMetadata().getLabels() == null ? null : kafkaConnector.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL);
String connectNamespace = connectorNamespace;
switch(action) {
case ADDED:
case DELETED:
case MODIFIED:
if (connectName != null) {
// Check whether a KafkaConnect exists
connectOperator.resourceOperator.getAsync(connectNamespace, connectName).compose(connect -> {
KafkaConnectApi apiClient = connectOperator.connectClientProvider.apply(connectOperator.vertx);
if (connect == null) {
Reconciliation r = new Reconciliation("connector-watch", connectOperator.kind(), kafkaConnector.getMetadata().getNamespace(), connectName);
updateStatus(r, noConnectCluster(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator);
LOGGER.infoCr(r, "{} {} in namespace {} was {}, but Connect cluster {} does not exist", connectorKind, connectorName, connectorNamespace, action, connectName);
return Future.succeededFuture();
} else {
// grab the lock and call reconcileConnectors()
// (i.e. short circuit doing a whole KafkaConnect reconciliation).
Reconciliation reconciliation = new Reconciliation("connector-watch", connectOperator.kind(), kafkaConnector.getMetadata().getNamespace(), connectName);
if (!Util.matchesSelector(selector, connect)) {
LOGGER.debugCr(reconciliation, "{} {} in namespace {} was {}, but Connect cluster {} does not match label selector {} and will be ignored", connectorKind, connectorName, connectorNamespace, action, connectName, selectorLabels);
return Future.succeededFuture();
} else if (connect.getSpec() != null && connect.getSpec().getReplicas() == 0) {
LOGGER.infoCr(reconciliation, "{} {} in namespace {} was {}, but Connect cluster {} has 0 replicas", connectorKind, connectorName, connectorNamespace, action, connectName);
updateStatus(reconciliation, zeroReplicas(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator);
return Future.succeededFuture();
} else {
LOGGER.infoCr(reconciliation, "{} {} in namespace {} was {}", connectorKind, connectorName, connectorNamespace, action);
return connectOperator.withLock(reconciliation, LOCK_TIMEOUT_MS, () -> connectOperator.reconcileConnectorAndHandleResult(reconciliation, KafkaConnectResources.qualifiedServiceName(connectName, connectNamespace), apiClient, isUseResources(connect), kafkaConnector.getMetadata().getName(), action == Action.DELETED ? null : kafkaConnector).compose(reconcileResult -> {
LOGGER.infoCr(reconciliation, "reconciled");
return Future.succeededFuture(reconcileResult);
}));
}
}
});
} else {
updateStatus(new Reconciliation("connector-watch", connectOperator.kind(), kafkaConnector.getMetadata().getNamespace(), null), new InvalidResourceException("Resource lacks label '" + Labels.STRIMZI_CLUSTER_LABEL + "': No connect cluster in which to create this connector."), kafkaConnector, connectOperator.connectorOperator);
}
break;
case ERROR:
LOGGER.errorCr(new Reconciliation("connector-watch", connectorKind, connectName, connectorNamespace), "Failed {} {} in namespace {} ", connectorKind, connectorName, connectorNamespace);
break;
default:
LOGGER.errorCr(new Reconciliation("connector-watch", connectorKind, connectName, connectorNamespace), "Unknown action: {} {} in namespace {}", connectorKind, connectorName, connectorNamespace);
}
}
@Override
public void onClose(WatcherException e) {
if (e != null) {
throw new KubernetesClientException(e.getMessage());
}
}
});
return watch;
});
}
Aggregations