use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.
the class CustomResourceStatusIsolatedST method assertKafkaTopicWrongMinInSyncReplicasStatus.
void assertKafkaTopicWrongMinInSyncReplicasStatus(String topicName, String invalidValue) {
KafkaTopicStatus kafkaTopicStatus = KafkaTopicResource.kafkaTopicClient().inNamespace(clusterOperator.getDeploymentNamespace()).withName(topicName).get().getStatus();
assertThat(kafkaTopicStatus.getConditions().stream().anyMatch(condition -> condition.getType().equals(NotReady.toString())), is(true));
assertThat(kafkaTopicStatus.getConditions().stream().anyMatch(condition -> condition.getReason().equals("InvalidRequestException")), is(true));
assertThat(kafkaTopicStatus.getConditions().stream().anyMatch(condition -> condition.getMessage().contains(String.format("Invalid value %s for configuration min.insync.replicas", invalidValue))), is(true));
}
use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.
the class CustomResourceStatusIsolatedST method assertKafkaTopicDecreasePartitionsStatus.
void assertKafkaTopicDecreasePartitionsStatus(String topicName) {
KafkaTopicStatus kafkaTopicStatus = KafkaTopicResource.kafkaTopicClient().inNamespace(clusterOperator.getDeploymentNamespace()).withName(topicName).get().getStatus();
assertThat(kafkaTopicStatus.getConditions().stream().anyMatch(condition -> condition.getType().equals(NotReady.toString())), is(true));
assertThat(kafkaTopicStatus.getConditions().stream().anyMatch(condition -> condition.getReason().equals("PartitionDecreaseException")), is(true));
assertThat(kafkaTopicStatus.getConditions().stream().anyMatch(condition -> condition.getMessage().contains("Number of partitions cannot be decreased")), is(true));
}
use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.
the class AllNamespaceIsolatedST method testUserInDifferentNamespace.
@IsolatedTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testUserInDifferentNamespace(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, SECOND_NAMESPACE);
String startingNamespace = cluster.setNamespace(SECOND_NAMESPACE);
KafkaUser user = KafkaUserTemplates.tlsUser(MAIN_NAMESPACE_CLUSTER_NAME, USER_NAME).build();
resourceManager.createResource(extensionContext, user);
Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(SECOND_NAMESPACE).withName(USER_NAME).get().getStatus().getConditions().get(0);
LOGGER.info("KafkaUser condition status: {}", kafkaCondition.getStatus());
LOGGER.info("KafkaUser condition type: {}", kafkaCondition.getType());
assertThat(kafkaCondition.getType(), is(Ready.toString()));
List<Secret> secretsOfSecondNamespace = kubeClient(SECOND_NAMESPACE).listSecrets();
cluster.setNamespace(THIRD_NAMESPACE);
for (Secret s : secretsOfSecondNamespace) {
if (s.getMetadata().getName().equals(USER_NAME)) {
LOGGER.info("Copying secret {} from namespace {} to namespace {}", s, SECOND_NAMESPACE, THIRD_NAMESPACE);
copySecret(s, THIRD_NAMESPACE, USER_NAME);
}
}
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(TOPIC_NAME).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(MAIN_NAMESPACE_CLUSTER_NAME)).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withNamespaceName(THIRD_NAMESPACE).withUserName(USER_NAME).build();
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(MAIN_NAMESPACE_CLUSTER_NAME), kafkaClients.consumerTlsStrimzi(MAIN_NAMESPACE_CLUSTER_NAME));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), THIRD_NAMESPACE, MESSAGE_COUNT);
cluster.setNamespace(startingNamespace);
}
use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.
the class TopicOperatorBaseIT method assertStatusReady.
protected void assertStatusReady(String topicName) throws InterruptedException, ExecutionException, TimeoutException {
waitFor(() -> {
KafkaTopic kafkaTopic = operation().inNamespace(NAMESPACE).withName(topicName).get();
if (kafkaTopic != null) {
KafkaTopicStatus status = kafkaTopic.getStatus();
if (status != null && Objects.equals(status.getObservedGeneration(), kafkaTopic.getMetadata().getGeneration()) && status.getConditions() != null) {
List<Condition> conditions = status.getConditions();
assertThat(conditions.size() > 0, is(true));
if (conditions.stream().anyMatch(condition -> "Ready".equals(condition.getType()) && "True".equals(condition.getStatus()))) {
return true;
} else {
LOGGER.info(conditions);
}
}
} else {
LOGGER.info("{} does not exist", topicName);
}
return false;
}, "status ready for topic " + topicName);
}
use of io.strimzi.api.kafka.model.status.Condition in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class DebeziumOperandSupport method computeStatus.
public static void computeStatus(ConnectorStatusSpec statusSpec, KafkaConnect kafkaConnect, KafkaConnector kafkaConnector) {
statusSpec.setConditions(new ArrayList<>());
statusSpec.setPhase(ManagedConnector.STATE_PROVISIONING);
var readyCondition = new io.fabric8.kubernetes.api.model.Condition();
readyCondition.setType("Ready");
readyCondition.setStatus("False");
readyCondition.setReason("Transitioning");
boolean kafkaConnectorFailed = true;
if (null != kafkaConnector) {
for (Condition condition : kafkaConnector.getStatus().getConditions()) {
var rc = cloneCondition(condition, "KafkaConnector:");
switch(condition.getType()) {
case "Ready":
readyCondition = cloneCondition(condition);
if ("True".equals(condition.getStatus())) {
statusSpec.setPhase(ManagedConnector.STATE_READY);
kafkaConnectorFailed = false;
}
break;
case "NotReady":
if ("ConnectRestException".equals(condition.getReason())) {
readyCondition = cloneAsReadyFalseCondition(condition);
statusSpec.setPhase(ManagedConnector.STATE_FAILED);
break;
}
break;
default:
break;
}
statusSpec.getConditions().add(rc);
}
}
if (null != kafkaConnect) {
for (Condition condition : kafkaConnect.getStatus().getConditions()) {
var rc = cloneCondition(condition, "KafkaConnect:");
switch(condition.getType()) {
case "Ready":
if (!"True".equals(condition.getStatus())) {
if (!kafkaConnectorFailed) {
readyCondition = cloneCondition(condition);
}
statusSpec.setPhase(ManagedConnector.STATE_PROVISIONING);
}
break;
case "NotReady":
if ("TimeoutException".equals(condition.getReason())) {
statusSpec.setPhase(ManagedConnector.STATE_FAILED);
if (!kafkaConnectorFailed) {
readyCondition = cloneAsReadyFalseCondition(condition);
readyCondition.setReason("KafkaClusterUnreachable");
readyCondition.setMessage("The configured Kafka Cluster is unreachable or ACLs deny access.");
}
break;
}
break;
default:
break;
}
statusSpec.getConditions().add(rc);
}
}
statusSpec.getConditions().add(readyCondition);
if (null != kafkaConnector) {
connector(kafkaConnector).map(KafkaConnectorStatus::getState).ifPresent(state -> {
switch(state) {
case KafkaConnectorStatus.STATE_FAILED:
statusSpec.setPhase(ManagedConnector.STATE_FAILED);
break;
case KafkaConnectorStatus.STATE_UNASSIGNED:
statusSpec.setPhase(ManagedConnector.STATE_PROVISIONING);
break;
case KafkaConnectorStatus.STATE_PAUSED:
statusSpec.setPhase(ManagedConnector.STATE_STOPPED);
break;
default:
break;
}
});
}
}
Aggregations