use of io.fabric8.kubernetes.api.model.Condition in project strimzi-kafka-operator by strimzi.
the class OperatorMetricsTest method successfulReconcile.
public void successfulReconcile(VertxTestContext context, Labels selectorLabels) {
MetricsProvider metrics = createCleanMetricsProvider();
AbstractWatchableStatusedResourceOperator resourceOperator = resourceOperatorWithExistingResourceWithSelectorLabel(selectorLabels);
AbstractOperator operator = new AbstractOperator(vertx, "TestResource", resourceOperator, metrics, selectorLabels) {
@Override
protected Future createOrUpdate(Reconciliation reconciliation, CustomResource resource) {
return Future.succeededFuture();
}
@Override
public Set<Condition> validate(Reconciliation reconciliation, CustomResource resource) {
return emptySet();
}
@Override
protected Future<Boolean> delete(Reconciliation reconciliation) {
return null;
}
@Override
protected Status createStatus() {
return new Status() {
};
}
};
Checkpoint async = context.checkpoint();
operator.reconcile(new Reconciliation("test", "TestResource", "my-namespace", "my-resource")).onComplete(context.succeeding(v -> context.verify(() -> {
MeterRegistry registry = metrics.meterRegistry();
Tag selectorTag = Tag.of("selector", selectorLabels != null ? selectorLabels.toSelectorString() : "");
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").meter().getId().getTags().get(2), is(selectorTag));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").tag("kind", "TestResource").counter().count(), is(1.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.successful").meter().getId().getTags().get(2), is(selectorTag));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", "TestResource").counter().count(), is(1.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").meter().getId().getTags().get(2), is(selectorTag));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "TestResource").timer().count(), is(1L));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "TestResource").timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "resource.state").tag("kind", "TestResource").tag("name", "my-resource").tag("resource-namespace", "my-namespace").gauge().value(), is(1.0));
async.flag();
})));
}
use of io.fabric8.kubernetes.api.model.Condition in project strimzi-kafka-operator by strimzi.
the class ResourceSupport method selfClosingWatch.
/**
* Watches the given {@code watchable} using the given
* {@code watchFn},
* returning a Future which completes when {@code watchFn} returns non-null
* to some event on the watchable, or after a timeout.
*
* The given {@code watchFn} will be invoked on a worker thread when the
* Kubernetes resources changes, so may block.
* When the {@code watchFn} returns non-null the watch will be closed and then
* the future returned from this method will be completed on the context thread.
*
* In some cases such as resource deletion, it might happen that the resource is deleted already before the watch is
* started and as a result the watch never completes. The {@code preCheckFn} will be invoked on a worker thread
* after the watch has been created. It is expected to double check if we still need to wait for the watch to fire.
* When the {@code preCheckFn} returns non-null the watch will be closed and the future returned from this method
* will be completed with the result of the {@code preCheckFn} on the context thread. In the deletion example
* described above, the {@code preCheckFn} can check if the resource still exists and close the watch in case it was
* already deleted.
*
* @param reconciliation Reconciliation marker used for logging
* @param watchable The watchable - used to watch the resource.
* @param gettable The Gettable - used to get the resource in the pre-check.
* @param operationTimeoutMs The timeout in ms.
* @param watchFnDescription A description of what {@code watchFn} is watching for.
* E.g. "observe ${condition} of ${kind} ${namespace}/${name}".
* @param watchFn The function to determine if the event occured
* @param preCheckFn Pre-check function to avoid situation when the watch is never fired because ot was started too late.
* @param <T> The type of watched resource.
* @param <U> The result type of the {@code watchFn}.
*
* @return A Futures which completes when the {@code watchFn} returns non-null
* in response to some Kubenetes even on the watched resource(s).
*/
<T, U> Future<U> selfClosingWatch(Reconciliation reconciliation, Watchable<Watcher<T>> watchable, Gettable<T> gettable, long operationTimeoutMs, String watchFnDescription, BiFunction<Watcher.Action, T, U> watchFn, Function<T, U> preCheckFn) {
return new Watcher<T>() {
private final Promise<Watch> watchPromise;
private final Promise<U> donePromise;
private final Promise<U> resultPromise;
private final long timerId;
/* init */
{
this.watchPromise = Promise.promise();
this.donePromise = Promise.promise();
this.resultPromise = Promise.promise();
this.timerId = vertx.setTimer(operationTimeoutMs, ignored -> donePromise.tryFail(new TimeoutException("\"" + watchFnDescription + "\" timed out after " + operationTimeoutMs + "ms")));
CompositeFuture.join(watchPromise.future(), donePromise.future()).onComplete(joinResult -> {
Future<Void> closeFuture;
if (watchPromise.future().succeeded()) {
closeFuture = closeOnWorkerThread(watchPromise.future().result());
} else {
closeFuture = Future.succeededFuture();
}
closeFuture.onComplete(closeResult -> vertx.runOnContext(ignored2 -> {
LOGGER.debugCr(reconciliation, "Completing watch future");
if (joinResult.succeeded() && closeResult.succeeded()) {
resultPromise.complete(joinResult.result().resultAt(1));
} else {
resultPromise.fail(collectCauses(joinResult, closeResult));
}
}));
});
try {
Watch watch = watchable.watch(this);
LOGGER.debugCr(reconciliation, "Opened watch {} for evaluation of {}", watch, watchFnDescription);
// Pre-check is done after the watch is open to make sure we did not missed the event. In the worst
// case, both pre-check and watch complete the future. But at least one should always complete it.
U apply = preCheckFn.apply(gettable.get());
if (apply != null) {
LOGGER.debugCr(reconciliation, "Pre-check is already complete, no need to wait for the watch: {}", watchFnDescription);
donePromise.tryComplete(apply);
vertx.cancelTimer(timerId);
} else {
LOGGER.debugCr(reconciliation, "Pre-check is not complete yet, let's wait for the watch: {}", watchFnDescription);
}
watchPromise.complete(watch);
} catch (Throwable t) {
watchPromise.fail(t);
}
}
@Override
public void eventReceived(Action action, T resource) {
vertx.executeBlocking(f -> {
try {
U apply = watchFn.apply(action, resource);
if (apply != null) {
LOGGER.debugCr(reconciliation, "Satisfied: {}", watchFnDescription);
f.tryComplete(apply);
vertx.cancelTimer(timerId);
} else {
LOGGER.debugCr(reconciliation, "Not yet satisfied: {}", watchFnDescription);
}
} catch (Throwable t) {
if (!f.tryFail(t)) {
LOGGER.debugCr(reconciliation, "Ignoring exception thrown while " + "evaluating watch {} because the future was already completed", watchFnDescription, t);
}
}
}, true, donePromise);
}
@Override
public void onClose(WatcherException cause) {
}
}.resultPromise.future();
}
use of io.fabric8.kubernetes.api.model.Condition in project strimzi-kafka-operator by strimzi.
the class AllNamespaceIsolatedST method testUserInDifferentNamespace.
@IsolatedTest
void testUserInDifferentNamespace(ExtensionContext extensionContext) {
String startingNamespace = cluster.setNamespace(SECOND_NAMESPACE);
KafkaUser user = KafkaUserTemplates.tlsUser(MAIN_NAMESPACE_CLUSTER_NAME, USER_NAME).build();
resourceManager.createResource(extensionContext, user);
Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(SECOND_NAMESPACE).withName(USER_NAME).get().getStatus().getConditions().get(0);
LOGGER.info("KafkaUser condition status: {}", kafkaCondition.getStatus());
LOGGER.info("KafkaUser condition type: {}", kafkaCondition.getType());
assertThat(kafkaCondition.getType(), is(Ready.toString()));
List<Secret> secretsOfSecondNamespace = kubeClient(SECOND_NAMESPACE).listSecrets();
cluster.setNamespace(THIRD_NAMESPACE);
for (Secret s : secretsOfSecondNamespace) {
if (s.getMetadata().getName().equals(USER_NAME)) {
LOGGER.info("Copying secret {} from namespace {} to namespace {}", s, SECOND_NAMESPACE, THIRD_NAMESPACE);
copySecret(s, THIRD_NAMESPACE, USER_NAME);
}
}
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, MAIN_NAMESPACE_CLUSTER_NAME + "-" + Constants.KAFKA_CLIENTS, user).build());
final String defaultKafkaClientsPodName = ResourceManager.kubeClient().listPodsByPrefixInName(MAIN_NAMESPACE_CLUSTER_NAME + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(defaultKafkaClientsPodName).withTopicName(TOPIC_NAME).withNamespaceName(THIRD_NAMESPACE).withClusterName(MAIN_NAMESPACE_CLUSTER_NAME).withMessageCount(MESSAGE_COUNT).withKafkaUsername(USER_NAME).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
LOGGER.info("Checking produced and consumed messages to pod:{}", defaultKafkaClientsPodName);
int sent = internalKafkaClient.sendMessagesTls();
assertThat(sent, is(MESSAGE_COUNT));
int received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(MESSAGE_COUNT));
cluster.setNamespace(startingNamespace);
}
use of io.fabric8.kubernetes.api.model.Condition in project strimzi-kafka-operator by strimzi.
the class ConnectIsolatedST method testScaleConnectWithConnectorToZero.
@ParallelNamespaceTest
@Tag(SCALABILITY)
@Tag(CONNECTOR_OPERATOR)
void testScaleConnectWithConnectorToZero(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 2).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().build());
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(clusterName).editSpec().withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector").addToConfig("file", Constants.DEFAULT_SINK_FILE_PATH).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("topics", topicName).endSpec().build());
String connectDeploymentName = KafkaConnectResources.deploymentName(clusterName);
List<Pod> connectPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaConnectResources.deploymentName(clusterName));
assertThat(connectPods.size(), is(2));
// scale down
LOGGER.info("Scaling KafkaConnect down to zero");
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, kafkaConnect -> kafkaConnect.getSpec().setReplicas(0), namespaceName);
KafkaConnectUtils.waitForConnectReady(namespaceName, clusterName);
PodUtils.waitForPodsReady(namespaceName, kubeClient(namespaceName).getDeploymentSelectors(namespaceName, connectDeploymentName), 0, true);
connectPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, connectDeploymentName);
KafkaConnectStatus connectStatus = KafkaConnectResource.kafkaConnectClient().inNamespace(namespaceName).withName(clusterName).get().getStatus();
KafkaConnectorStatus connectorStatus = KafkaConnectorResource.kafkaConnectorClient().inNamespace(namespaceName).withName(clusterName).get().getStatus();
assertThat(connectPods.size(), is(0));
assertThat(connectStatus.getConditions().get(0).getType(), is(Ready.toString()));
assertThat(connectorStatus.getConditions().stream().anyMatch(condition -> condition.getType().equals(NotReady.toString())), is(true));
assertThat(connectorStatus.getConditions().stream().anyMatch(condition -> condition.getMessage().contains("has 0 replicas")), is(true));
}
use of io.fabric8.kubernetes.api.model.Condition in project strimzi-kafka-operator by strimzi.
the class ConnectBuilderIsolatedST method testBuildFailsWithWrongChecksumOfArtifact.
@ParallelTest
void testBuildFailsWithWrongChecksumOfArtifact(ExtensionContext extensionContext) {
String connectClusterName = mapWithClusterNames.get(extensionContext.getDisplayName()) + "-connect";
String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
final String imageName = getImageNameForTestCase();
Plugin pluginWithWrongChecksum = new PluginBuilder().withName("connector-with-wrong-checksum").withArtifacts(new JarArtifactBuilder().withUrl(ECHO_SINK_JAR_URL).withSha512sum(ECHO_SINK_JAR_WRONG_CHECKSUM).build()).build();
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
String kafkaClientsPodName = kubeClient(INFRA_NAMESPACE).listPodsByPrefixInName(kafkaClientsName).get(0).getMetadata().getName();
resourceManager.createResource(extensionContext, false, KafkaConnectTemplates.kafkaConnect(extensionContext, connectClusterName, INFRA_NAMESPACE, INFRA_NAMESPACE, 1).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editOrNewSpec().withNewBuild().withPlugins(pluginWithWrongChecksum).withNewDockerOutput().withImage(imageName).endDockerOutput().endBuild().endSpec().build());
KafkaConnectUtils.waitForConnectNotReady(connectClusterName);
KafkaConnectUtils.waitUntilKafkaConnectStatusConditionContainsMessage(connectClusterName, INFRA_NAMESPACE, "The Kafka Connect build failed(.*)?");
LOGGER.info("Checking if KafkaConnect status condition contains message about build failure");
KafkaConnect kafkaConnect = KafkaConnectResource.kafkaConnectClient().inNamespace(INFRA_NAMESPACE).withName(connectClusterName).get();
LOGGER.info("Deploying network policies for KafkaConnect");
NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, kafkaConnect, KafkaConnectResources.deploymentName(connectClusterName));
Condition connectCondition = kafkaConnect.getStatus().getConditions().stream().findFirst().orElseThrow();
assertTrue(connectCondition.getMessage().matches("The Kafka Connect build failed(.*)?"));
assertThat(connectCondition.getType(), is(NotReady.toString()));
LOGGER.info("Replacing plugin's checksum with right one");
KafkaConnectResource.replaceKafkaConnectResource(connectClusterName, kC -> {
Plugin pluginWithRightChecksum = new PluginBuilder().withName("connector-with-right-checksum").withArtifacts(new JarArtifactBuilder().withUrl(ECHO_SINK_JAR_URL).withSha512sum(ECHO_SINK_JAR_CHECKSUM).build()).build();
kC.getSpec().getBuild().getPlugins().remove(0);
kC.getSpec().getBuild().getPlugins().add(pluginWithRightChecksum);
});
KafkaConnectUtils.waitForConnectReady(connectClusterName);
LOGGER.info("Checking if KafkaConnect API contains EchoSink connector");
String plugins = cmdKubeClient().execInPod(kafkaClientsPodName, "curl", "-X", "GET", "http://" + KafkaConnectResources.serviceName(connectClusterName) + ":8083/connector-plugins").out();
assertTrue(plugins.contains(ECHO_SINK_CLASS_NAME));
LOGGER.info("Checking if KafkaConnect resource contains EchoSink connector in status");
kafkaConnect = KafkaConnectResource.kafkaConnectClient().inNamespace(INFRA_NAMESPACE).withName(connectClusterName).get();
assertTrue(kafkaConnect.getStatus().getConnectorPlugins().stream().anyMatch(connectorPlugin -> connectorPlugin.getConnectorClass().contains(ECHO_SINK_CLASS_NAME)));
}
Aggregations