use of io.strimzi.api.kafka.model.status.Condition in project strimzi by strimzi.
the class UserST method testUserWithNameMoreThan64Chars.
@ParallelTest
void testUserWithNameMoreThan64Chars(ExtensionContext extensionContext) {
// 65 character username
String userWithLongName = "user" + "abcdefghijklmnopqrstuvxyzabcdefghijklmnopqrstuvxyzabcdefghijk";
// 64 character username
String userWithCorrectName = "user-with-correct-name" + "abcdefghijklmnopqrstuvxyzabcdefghijklmnopq";
// 65 character username
String saslUserWithLongName = "sasl-user" + "abcdefghijklmnopqrstuvxyzabcdefghijklmnopqrstuvxyzabcdef";
// Create user with correct name
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(userClusterName, userWithCorrectName).editMetadata().withNamespace(namespace).endMetadata().build());
KafkaUserUtils.waitUntilKafkaUserStatusConditionIsPresent(namespace, userWithCorrectName);
Condition condition = KafkaUserResource.kafkaUserClient().inNamespace(namespace).withName(userWithCorrectName).get().getStatus().getConditions().get(0);
verifyCRStatusCondition(condition, "True", Ready);
// Create sasl user with long name, shouldn't fail
resourceManager.createResource(extensionContext, KafkaUserTemplates.scramShaUser(userClusterName, saslUserWithLongName).editMetadata().withNamespace(namespace).endMetadata().build());
resourceManager.createResource(extensionContext, false, KafkaUserTemplates.defaultUser(userClusterName, userWithLongName).editMetadata().withNamespace(namespace).endMetadata().withNewSpec().withNewKafkaUserTlsClientAuthentication().endKafkaUserTlsClientAuthentication().endSpec().build());
KafkaUserUtils.waitUntilKafkaUserStatusConditionIsPresent(namespace, userWithLongName);
condition = KafkaUserResource.kafkaUserClient().inNamespace(namespace).withName(userWithLongName).get().getStatus().getConditions().get(0);
verifyCRStatusCondition(condition, "only up to 64 characters", "InvalidResourceException", "True", NotReady);
}
use of io.strimzi.api.kafka.model.status.Condition in project strimzi by strimzi.
the class ClusterOperatorRbacIsolatedST method testCRBDeletionErrorsWhenRackAwarenessIsEnabled.
@IsolatedTest("We need for each test case its own Cluster Operator")
@Tag(CONNECT)
@Tag(CONNECT_COMPONENTS)
void testCRBDeletionErrorsWhenRackAwarenessIsEnabled(ExtensionContext extensionContext) {
assumeFalse(Environment.isNamespaceRbacScope());
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
// 060-Deployment
clusterOperator.unInstall();
clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withClusterOperatorRBACType(ClusterOperatorRBACType.NAMESPACE).createInstallation().runBundleInstallation();
String rackKey = "rack-key";
LOGGER.info("Deploying Kafka: {}, which should not be deployed and error should be present in CR status message", clusterName);
resourceManager.createResource(extensionContext, false, KafkaTemplates.kafkaEphemeral(clusterName, 3, 3).editOrNewSpec().editOrNewKafka().withNewRack().withTopologyKey(rackKey).endRack().endKafka().endSpec().build());
KafkaUtils.waitUntilKafkaStatusConditionContainsMessage(clusterName, INFRA_NAMESPACE, ".*Forbidden!.*");
Condition kafkaStatusCondition = KafkaResource.kafkaClient().inNamespace(INFRA_NAMESPACE).withName(clusterName).get().getStatus().getConditions().get(0);
assertTrue(kafkaStatusCondition.getMessage().contains("Configured service account doesn't have access."));
assertThat(kafkaStatusCondition.getType(), is(NotReady.toString()));
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(kafkaClientsName).build());
resourceManager.createResource(extensionContext, false, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, clusterName, 1).editSpec().withNewRack(rackKey).endSpec().build());
KafkaConnectUtils.waitUntilKafkaConnectStatusConditionContainsMessage(clusterName, INFRA_NAMESPACE, ".*Forbidden!.*");
Condition kafkaConnectStatusCondition = KafkaConnectResource.kafkaConnectClient().inNamespace(INFRA_NAMESPACE).withName(clusterName).get().getStatus().getConditions().get(0);
assertTrue(kafkaConnectStatusCondition.getMessage().contains("Configured service account doesn't have access."));
assertThat(kafkaConnectStatusCondition.getType(), is(NotReady.toString()));
}
use of io.strimzi.api.kafka.model.status.Condition in project strimzi by strimzi.
the class AllNamespaceIsolatedST method testUserInDifferentNamespace.
@IsolatedTest
void testUserInDifferentNamespace(ExtensionContext extensionContext) {
String startingNamespace = cluster.setNamespace(SECOND_NAMESPACE);
KafkaUser user = KafkaUserTemplates.tlsUser(MAIN_NAMESPACE_CLUSTER_NAME, USER_NAME).build();
resourceManager.createResource(extensionContext, user);
Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(SECOND_NAMESPACE).withName(USER_NAME).get().getStatus().getConditions().get(0);
LOGGER.info("KafkaUser condition status: {}", kafkaCondition.getStatus());
LOGGER.info("KafkaUser condition type: {}", kafkaCondition.getType());
assertThat(kafkaCondition.getType(), is(Ready.toString()));
List<Secret> secretsOfSecondNamespace = kubeClient(SECOND_NAMESPACE).listSecrets();
cluster.setNamespace(THIRD_NAMESPACE);
for (Secret s : secretsOfSecondNamespace) {
if (s.getMetadata().getName().equals(USER_NAME)) {
LOGGER.info("Copying secret {} from namespace {} to namespace {}", s, SECOND_NAMESPACE, THIRD_NAMESPACE);
copySecret(s, THIRD_NAMESPACE, USER_NAME);
}
}
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, MAIN_NAMESPACE_CLUSTER_NAME + "-" + Constants.KAFKA_CLIENTS, user).build());
final String defaultKafkaClientsPodName = ResourceManager.kubeClient().listPodsByPrefixInName(MAIN_NAMESPACE_CLUSTER_NAME + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(defaultKafkaClientsPodName).withTopicName(TOPIC_NAME).withNamespaceName(THIRD_NAMESPACE).withClusterName(MAIN_NAMESPACE_CLUSTER_NAME).withMessageCount(MESSAGE_COUNT).withKafkaUsername(USER_NAME).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
LOGGER.info("Checking produced and consumed messages to pod:{}", defaultKafkaClientsPodName);
int sent = internalKafkaClient.sendMessagesTls();
assertThat(sent, is(MESSAGE_COUNT));
int received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(MESSAGE_COUNT));
cluster.setNamespace(startingNamespace);
}
use of io.strimzi.api.kafka.model.status.Condition in project strimzi by strimzi.
the class TopicOperatorBaseIT method assertStatusReady.
protected void assertStatusReady(String topicName) throws InterruptedException, ExecutionException, TimeoutException {
waitFor(() -> {
KafkaTopic kafkaTopic = operation().inNamespace(NAMESPACE).withName(topicName).get();
if (kafkaTopic != null) {
KafkaTopicStatus status = kafkaTopic.getStatus();
if (status != null && Objects.equals(status.getObservedGeneration(), kafkaTopic.getMetadata().getGeneration()) && status.getConditions() != null) {
List<Condition> conditions = status.getConditions();
assertThat(conditions.size() > 0, is(true));
if (conditions.stream().anyMatch(condition -> "Ready".equals(condition.getType()) && "True".equals(condition.getStatus()))) {
return true;
} else {
LOGGER.info(conditions);
}
}
} else {
LOGGER.info("{} does not exist", topicName);
}
return false;
}, "status ready for topic " + topicName);
}
use of io.strimzi.api.kafka.model.status.Condition in project strimzi by strimzi.
the class KafkaCluster method fromCrd.
@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:JavaNCSS" })
public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
KafkaSpec kafkaSpec = kafkaAssembly.getSpec();
KafkaClusterSpec kafkaClusterSpec = kafkaSpec.getKafka();
KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly);
// This also validates that the Kafka version is supported
result.kafkaVersion = versions.supportedVersion(kafkaClusterSpec.getVersion());
result.setOwnerReference(kafkaAssembly);
result.setReplicas(kafkaClusterSpec.getReplicas());
validateIntConfigProperty("default.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("offsets.topic.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("transaction.state.log.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("transaction.state.log.min.isr", kafkaClusterSpec);
result.setImage(versions.kafkaImage(kafkaClusterSpec.getImage(), kafkaClusterSpec.getVersion()));
if (kafkaClusterSpec.getReadinessProbe() != null) {
result.setReadinessProbe(kafkaClusterSpec.getReadinessProbe());
}
if (kafkaClusterSpec.getLivenessProbe() != null) {
result.setLivenessProbe(kafkaClusterSpec.getLivenessProbe());
}
result.setRack(kafkaClusterSpec.getRack());
String initImage = kafkaClusterSpec.getBrokerRackInitImage();
if (initImage == null) {
initImage = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KAFKA_INIT_IMAGE, "quay.io/strimzi/operator:latest");
}
result.setInitImage(initImage);
Logging logging = kafkaClusterSpec.getLogging();
result.setLogging(logging == null ? new InlineLogging() : logging);
result.setGcLoggingEnabled(kafkaClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : kafkaClusterSpec.getJvmOptions().isGcLoggingEnabled());
if (kafkaClusterSpec.getJvmOptions() != null) {
result.setJavaSystemProperties(kafkaClusterSpec.getJvmOptions().getJavaSystemProperties());
}
result.setJvmOptions(kafkaClusterSpec.getJvmOptions());
if (kafkaClusterSpec.getJmxOptions() != null) {
result.setJmxEnabled(Boolean.TRUE);
AuthenticationUtils.configureKafkaJmxOptions(kafkaClusterSpec.getJmxOptions().getAuthentication(), result);
}
// Handle Kafka broker configuration
KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
configureCruiseControlMetrics(kafkaAssembly, result, configuration);
validateConfiguration(reconciliation, kafkaAssembly, result.kafkaVersion, configuration);
result.setConfiguration(configuration);
// Parse different types of metrics configurations
ModelUtils.parseMetrics(result, kafkaClusterSpec);
if (oldStorage != null) {
Storage newStorage = kafkaClusterSpec.getStorage();
AbstractModel.validatePersistentStorage(newStorage);
StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas());
if (!diff.isEmpty()) {
LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " + "changing the deleteClaim flag, " + "adding volumes to Jbod storage or removing volumes from Jbod storage, " + "changing overrides to nodes which do not exist yet" + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
Condition warning = StatusUtils.buildWarningCondition("KafkaStorage", "The desired Kafka storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
result.addWarningCondition(warning);
result.setStorage(oldStorage);
} else {
result.setStorage(newStorage);
}
} else {
result.setStorage(kafkaClusterSpec.getStorage());
}
result.setResources(kafkaClusterSpec.getResources());
// Configure listeners
if (kafkaClusterSpec.getListeners() == null || kafkaClusterSpec.getListeners().isEmpty()) {
LOGGER.errorCr(reconciliation, "The required field .spec.kafka.listeners is missing");
throw new InvalidResourceException("The required field .spec.kafka.listeners is missing");
}
List<GenericKafkaListener> listeners = kafkaClusterSpec.getListeners();
ListenersValidator.validate(reconciliation, kafkaClusterSpec.getReplicas(), listeners);
result.setListeners(listeners);
// Set authorization
if (kafkaClusterSpec.getAuthorization() instanceof KafkaAuthorizationKeycloak) {
if (!ListenersUtils.hasListenerWithOAuth(listeners)) {
throw new InvalidResourceException("You cannot configure Keycloak Authorization without any listener with OAuth based authentication");
} else {
KafkaAuthorizationKeycloak authorizationKeycloak = (KafkaAuthorizationKeycloak) kafkaClusterSpec.getAuthorization();
if (authorizationKeycloak.getClientId() == null || authorizationKeycloak.getTokenEndpointUri() == null) {
LOGGER.errorCr(reconciliation, "Keycloak Authorization: Token Endpoint URI and clientId are both required");
throw new InvalidResourceException("Keycloak Authorization: Token Endpoint URI and clientId are both required");
}
}
}
result.setAuthorization(kafkaClusterSpec.getAuthorization());
if (kafkaClusterSpec.getTemplate() != null) {
KafkaClusterTemplate template = kafkaClusterSpec.getTemplate();
if (template.getStatefulset() != null) {
if (template.getStatefulset().getPodManagementPolicy() != null) {
result.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
}
if (template.getStatefulset().getMetadata() != null) {
result.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
result.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
}
}
if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
result.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
result.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
}
ModelUtils.parsePodTemplate(result, template.getPod());
ModelUtils.parseInternalServiceTemplate(result, template.getBootstrapService());
ModelUtils.parseInternalHeadlessServiceTemplate(result, template.getBrokersService());
if (template.getExternalBootstrapService() != null) {
if (template.getExternalBootstrapService().getMetadata() != null) {
result.templateExternalBootstrapServiceLabels = template.getExternalBootstrapService().getMetadata().getLabels();
result.templateExternalBootstrapServiceAnnotations = template.getExternalBootstrapService().getMetadata().getAnnotations();
}
}
if (template.getPerPodService() != null) {
if (template.getPerPodService().getMetadata() != null) {
result.templatePerPodServiceLabels = template.getPerPodService().getMetadata().getLabels();
result.templatePerPodServiceAnnotations = template.getPerPodService().getMetadata().getAnnotations();
}
}
if (template.getExternalBootstrapRoute() != null && template.getExternalBootstrapRoute().getMetadata() != null) {
result.templateExternalBootstrapRouteLabels = template.getExternalBootstrapRoute().getMetadata().getLabels();
result.templateExternalBootstrapRouteAnnotations = template.getExternalBootstrapRoute().getMetadata().getAnnotations();
}
if (template.getPerPodRoute() != null && template.getPerPodRoute().getMetadata() != null) {
result.templatePerPodRouteLabels = template.getPerPodRoute().getMetadata().getLabels();
result.templatePerPodRouteAnnotations = template.getPerPodRoute().getMetadata().getAnnotations();
}
if (template.getExternalBootstrapIngress() != null && template.getExternalBootstrapIngress().getMetadata() != null) {
result.templateExternalBootstrapIngressLabels = template.getExternalBootstrapIngress().getMetadata().getLabels();
result.templateExternalBootstrapIngressAnnotations = template.getExternalBootstrapIngress().getMetadata().getAnnotations();
}
if (template.getPerPodIngress() != null && template.getPerPodIngress().getMetadata() != null) {
result.templatePerPodIngressLabels = template.getPerPodIngress().getMetadata().getLabels();
result.templatePerPodIngressAnnotations = template.getPerPodIngress().getMetadata().getAnnotations();
}
if (template.getClusterRoleBinding() != null && template.getClusterRoleBinding().getMetadata() != null) {
result.templateClusterRoleBindingLabels = template.getClusterRoleBinding().getMetadata().getLabels();
result.templateClusterRoleBindingAnnotations = template.getClusterRoleBinding().getMetadata().getAnnotations();
}
if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
result.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), result.templateStatefulSetLabels);
result.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
}
if (template.getKafkaContainer() != null && template.getKafkaContainer().getEnv() != null) {
result.templateKafkaContainerEnvVars = template.getKafkaContainer().getEnv();
}
if (template.getInitContainer() != null && template.getInitContainer().getEnv() != null) {
result.templateInitContainerEnvVars = template.getInitContainer().getEnv();
}
if (template.getKafkaContainer() != null && template.getKafkaContainer().getSecurityContext() != null) {
result.templateKafkaContainerSecurityContext = template.getKafkaContainer().getSecurityContext();
}
if (template.getInitContainer() != null && template.getInitContainer().getSecurityContext() != null) {
result.templateInitContainerSecurityContext = template.getInitContainer().getSecurityContext();
}
if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
result.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
result.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
}
if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
result.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
result.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
}
ModelUtils.parsePodDisruptionBudgetTemplate(result, template.getPodDisruptionBudget());
}
result.templatePodLabels = Util.mergeLabelsOrAnnotations(result.templatePodLabels, DEFAULT_POD_LABELS);
return result;
}
Aggregations