use of io.fabric8.kubernetes.api.model.Condition in project strimzi-kafka-operator by strimzi.
the class ZookeeperCluster method fromCrd.
@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:CyclomaticComplexity" })
public static ZookeeperCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
ZookeeperCluster zk = new ZookeeperCluster(reconciliation, kafkaAssembly);
zk.setOwnerReference(kafkaAssembly);
ZookeeperClusterSpec zookeeperClusterSpec = kafkaAssembly.getSpec().getZookeeper();
int replicas = zookeeperClusterSpec.getReplicas();
if (replicas <= 0) {
replicas = ZookeeperClusterSpec.DEFAULT_REPLICAS;
}
if (replicas == 1 && zookeeperClusterSpec.getStorage() != null && "ephemeral".equals(zookeeperClusterSpec.getStorage().getType())) {
LOGGER.warnCr(reconciliation, "A ZooKeeper cluster with a single replica and ephemeral storage will be in a defective state after any restart or rolling update. It is recommended that a minimum of three replicas are used.");
}
zk.setReplicas(replicas);
// Get the ZK version information from either the CRD or from the default setting
KafkaClusterSpec kafkaClusterSpec = kafkaAssembly.getSpec().getKafka();
String version = versions.supportedVersion(kafkaClusterSpec != null ? kafkaClusterSpec.getVersion() : null).zookeeperVersion();
zk.setVersion(version);
String image = zookeeperClusterSpec.getImage();
if (image == null) {
image = versions.kafkaImage(kafkaClusterSpec != null ? kafkaClusterSpec.getImage() : null, kafkaClusterSpec != null ? kafkaClusterSpec.getVersion() : null);
}
zk.setImage(image);
if (zookeeperClusterSpec.getReadinessProbe() != null) {
zk.setReadinessProbe(zookeeperClusterSpec.getReadinessProbe());
}
if (zookeeperClusterSpec.getLivenessProbe() != null) {
zk.setLivenessProbe(zookeeperClusterSpec.getLivenessProbe());
}
Logging logging = zookeeperClusterSpec.getLogging();
zk.setLogging(logging == null ? new InlineLogging() : logging);
zk.setGcLoggingEnabled(zookeeperClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : zookeeperClusterSpec.getJvmOptions().isGcLoggingEnabled());
if (zookeeperClusterSpec.getJvmOptions() != null) {
zk.setJavaSystemProperties(zookeeperClusterSpec.getJvmOptions().getJavaSystemProperties());
}
// Parse different types of metrics configurations
ModelUtils.parseMetrics(zk, zookeeperClusterSpec);
if (oldStorage != null) {
Storage newStorage = zookeeperClusterSpec.getStorage();
AbstractModel.validatePersistentStorage(newStorage);
StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, zookeeperClusterSpec.getReplicas());
if (!diff.isEmpty()) {
LOGGER.warnCr(reconciliation, "Only the following changes to Zookeeper storage are allowed: " + "changing the deleteClaim flag, " + "changing overrides to nodes which do not exist yet " + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
LOGGER.warnCr(reconciliation, "The desired ZooKeeper storage configuration in the custom resource {}/{} contains changes which are not allowed. As " + "a result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
Condition warning = StatusUtils.buildWarningCondition("ZooKeeperStorage", "The desired ZooKeeper storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
zk.addWarningCondition(warning);
zk.setStorage(oldStorage);
} else {
zk.setStorage(newStorage);
}
} else {
zk.setStorage(zookeeperClusterSpec.getStorage());
}
zk.setConfiguration(new ZookeeperConfiguration(reconciliation, zookeeperClusterSpec.getConfig().entrySet()));
zk.setResources(zookeeperClusterSpec.getResources());
zk.setJvmOptions(zookeeperClusterSpec.getJvmOptions());
if (zookeeperClusterSpec.getJmxOptions() != null) {
zk.setJmxEnabled(Boolean.TRUE);
AuthenticationUtils.configureZookeeperJmxOptions(zookeeperClusterSpec.getJmxOptions().getAuthentication(), zk);
}
if (zookeeperClusterSpec.getTemplate() != null) {
ZookeeperClusterTemplate template = zookeeperClusterSpec.getTemplate();
if (template.getStatefulset() != null) {
if (template.getStatefulset().getPodManagementPolicy() != null) {
zk.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
}
if (template.getStatefulset().getMetadata() != null) {
zk.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
zk.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
}
}
if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
zk.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
zk.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
}
ModelUtils.parsePodTemplate(zk, template.getPod());
ModelUtils.parseInternalServiceTemplate(zk, template.getClientService());
ModelUtils.parseInternalHeadlessServiceTemplate(zk, template.getNodesService());
if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
zk.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), zk.templateStatefulSetLabels);
zk.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
}
if (template.getZookeeperContainer() != null && template.getZookeeperContainer().getEnv() != null) {
zk.templateZookeeperContainerEnvVars = template.getZookeeperContainer().getEnv();
}
if (template.getZookeeperContainer() != null && template.getZookeeperContainer().getSecurityContext() != null) {
zk.templateZookeeperContainerSecurityContext = template.getZookeeperContainer().getSecurityContext();
}
if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
zk.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
zk.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
}
if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
zk.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
zk.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
}
ModelUtils.parsePodDisruptionBudgetTemplate(zk, template.getPodDisruptionBudget());
}
zk.templatePodLabels = Util.mergeLabelsOrAnnotations(zk.templatePodLabels, DEFAULT_POD_LABELS);
return zk;
}
use of io.fabric8.kubernetes.api.model.Condition in project strimzi-kafka-operator by strimzi.
the class KafkaRebalanceAssemblyOperatorTest method testNewWithMissingHardGoals.
/**
* Tests the transition from 'New' to 'NotReady' due to "missing hard goals" error
*
* 1. A new KafkaRebalance resource is created with some specified not hard goals; it is in the 'New' state
* 2. The operator requests a rebalance proposal through the Cruise Control REST API
* 3. The operator gets a "missing hard goals" error instead of a proposal
* 4. The KafkaRebalance resource moves to the 'NotReady' state
*/
@Test
public void testNewWithMissingHardGoals(VertxTestContext context) throws IOException, URISyntaxException {
// Setup the rebalance endpoint to get error about hard goals
MockCruiseControl.setupCCRebalanceBadGoalsError(ccServer);
KafkaRebalanceSpec kafkaRebalanceSpec = new KafkaRebalanceSpecBuilder().withGoals("DiskCapacityGoal", "CpuCapacityGoal").build();
KafkaRebalance kr = createKafkaRebalance(CLUSTER_NAMESPACE, CLUSTER_NAME, RESOURCE_NAME, kafkaRebalanceSpec);
Crds.kafkaRebalanceOperation(kubernetesClient).inNamespace(CLUSTER_NAMESPACE).create(kr);
// the Kafka cluster isn't deployed in the namespace
when(mockKafkaOps.getAsync(CLUSTER_NAMESPACE, CLUSTER_NAME)).thenReturn(Future.succeededFuture(kafka));
mockSecretResources();
mockRebalanceOperator(mockRebalanceOps, mockCmOps, CLUSTER_NAMESPACE, RESOURCE_NAME, kubernetesClient);
Checkpoint checkpoint = context.checkpoint();
kcrao.reconcileRebalance(new Reconciliation("test-trigger", KafkaRebalance.RESOURCE_KIND, CLUSTER_NAMESPACE, RESOURCE_NAME), kr).onComplete(context.succeeding(v -> context.verify(() -> {
// the resource moved from New to NotReady due to the error
KafkaRebalance kr1 = Crds.kafkaRebalanceOperation(kubernetesClient).inNamespace(CLUSTER_NAMESPACE).withName(RESOURCE_NAME).get();
assertThat(kr1, StateMatchers.hasState());
Condition condition = kcrao.rebalanceStateCondition(kr1.getStatus());
assertThat(condition, StateMatchers.hasStateInCondition(KafkaRebalanceState.NotReady, CruiseControlRestException.class, "Error processing POST request '/rebalance' due to: " + "'java.lang.IllegalArgumentException: Missing hard goals [NetworkInboundCapacityGoal, DiskCapacityGoal, RackAwareGoal, NetworkOutboundCapacityGoal, CpuCapacityGoal, ReplicaCapacityGoal] " + "in the provided goals: [RackAwareGoal, ReplicaCapacityGoal]. " + "Add skip_hard_goal_check=true parameter to ignore this sanity check.'."));
checkpoint.flag();
})));
}
use of io.fabric8.kubernetes.api.model.Condition in project strimzi-kafka-operator by strimzi.
the class KafkaRebalanceAssemblyOperator method reconcile.
private Future<Void> reconcile(Reconciliation reconciliation, String host, CruiseControlApi apiClient, KafkaRebalance kafkaRebalance, KafkaRebalanceState currentState, KafkaRebalanceAnnotation rebalanceAnnotation) {
LOGGER.infoCr(reconciliation, "Rebalance action from state [{}]", currentState);
if (Annotations.isReconciliationPausedWithAnnotation(kafkaRebalance)) {
// we need to do this check again because it was triggered by a watcher
KafkaRebalanceStatus status = new KafkaRebalanceStatus();
Set<Condition> unknownAndDeprecatedConditions = validate(reconciliation, kafkaRebalance);
unknownAndDeprecatedConditions.add(StatusUtils.getPausedCondition());
status.setConditions(new ArrayList<>(unknownAndDeprecatedConditions));
return updateStatus(reconciliation, kafkaRebalance, status, null).compose(i -> Future.succeededFuture());
}
RebalanceOptions.RebalanceOptionsBuilder rebalanceOptionsBuilder = convertRebalanceSpecToRebalanceOptions(kafkaRebalance.getSpec(), usingJbodStorage);
return computeNextStatus(reconciliation, host, apiClient, kafkaRebalance, currentState, rebalanceAnnotation, rebalanceOptionsBuilder).compose(desiredStatusAndMap -> {
// do a new get to retrieve the current resource state.
return kafkaRebalanceOperator.getAsync(reconciliation.namespace(), reconciliation.name()).compose(currentKafkaRebalance -> {
if (currentKafkaRebalance != null) {
return configMapOperator.reconcile(reconciliation, kafkaRebalance.getMetadata().getNamespace(), kafkaRebalance.getMetadata().getName(), desiredStatusAndMap.getLoadMap()).compose(i -> updateStatus(reconciliation, currentKafkaRebalance, desiredStatusAndMap.getStatus(), null)).compose(updatedKafkaRebalance -> {
LOGGER.infoCr(reconciliation, "State updated to [{}] with annotation {}={} ", rebalanceStateConditionType(updatedKafkaRebalance.getStatus()), ANNO_STRIMZI_IO_REBALANCE, rawRebalanceAnnotation(updatedKafkaRebalance));
if (hasRebalanceAnnotation(updatedKafkaRebalance)) {
LOGGER.debugCr(reconciliation, "Removing annotation {}={}", ANNO_STRIMZI_IO_REBALANCE, rawRebalanceAnnotation(updatedKafkaRebalance));
// Updated KafkaRebalance has rebalance annotation removed as
// action specified by user has been completed.
KafkaRebalance patchedKafkaRebalance = new KafkaRebalanceBuilder(updatedKafkaRebalance).editMetadata().removeFromAnnotations(ANNO_STRIMZI_IO_REBALANCE).endMetadata().build();
return kafkaRebalanceOperator.patchAsync(reconciliation, patchedKafkaRebalance);
} else {
LOGGER.debugCr(reconciliation, "No annotation {}", ANNO_STRIMZI_IO_REBALANCE);
return Future.succeededFuture();
}
}).mapEmpty();
} else {
return Future.succeededFuture();
}
}, exception -> {
LOGGER.errorCr(reconciliation, "Status updated to [NotReady] due to error: {}", exception.getMessage());
return updateStatus(reconciliation, kafkaRebalance, new KafkaRebalanceStatus(), exception).mapEmpty();
});
}, exception -> {
LOGGER.errorCr(reconciliation, "Status updated to [NotReady] due to error: {}", exception.getMessage());
return updateStatus(reconciliation, kafkaRebalance, new KafkaRebalanceStatus(), exception).mapEmpty();
});
}
use of io.fabric8.kubernetes.api.model.Condition in project droolsjbpm-integration by kiegroup.
the class LeaderElectionImpl method tryAcquireLeadership.
boolean tryAcquireLeadership() {
if (logger.isDebugEnabled()) {
logger.debug("{} Trying to acquire the leadership...", logPrefix());
}
ConfigMap configMap = this.latestConfigMap;
Set<String> members = this.latestMembers;
LeaderInfo latestLeaderInfoLocal = this.latestLeaderInfo;
if (latestLeaderInfoLocal == null || members == null) {
if (logger.isWarnEnabled()) {
logger.warn("{} Unexpected condition. Latest leader info or list of members is empty.", logPrefix());
}
return false;
} else if (!members.contains(this.lockConfiguration.getPodName())) {
if (logger.isWarnEnabled()) {
logger.warn("{} The list of cluster members {} does not contain the current Pod. Cannot acquire leadership.", logPrefix(), latestLeaderInfoLocal.getMembers());
}
return false;
}
// Info we would set set in the configmap to become leaders
LeaderInfo newLeaderInfo = new LeaderInfo(this.lockConfiguration.getGroupName(), this.lockConfiguration.getPodName(), new Date(), members);
if (configMap == null) {
// No ConfigMap created so far
if (logger.isDebugEnabled()) {
logger.debug("{} Lock configmap is not present in the Kubernetes namespace. A new ConfigMap will be created", logPrefix());
}
ConfigMap newConfigMap = ConfigMapLockUtils.createNewConfigMap(this.lockConfiguration.getConfigMapName(), newLeaderInfo);
try {
kubernetesClient.configMaps().inNamespace(this.lockConfiguration.getKubernetesResourcesNamespaceOrDefault(kubernetesClient)).create(newConfigMap);
if (logger.isDebugEnabled()) {
logger.debug("{} ConfigMap {} successfully created", logPrefix(), this.lockConfiguration.getConfigMapName());
}
updateLatestLeaderInfo(newConfigMap, members);
return true;
} catch (Exception ex) {
// Suppress exception
logger.warn("Unable to create the ConfigMap, it may have been created by other cluster members concurrently. " + "If the problem persists, check if the service account has the right permissions to create it", logPrefix());
logger.debug("{} Exception while trying to create the ConfigMap", logPrefix(), ex);
return false;
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("{} Lock configmap already present in the Kubernetes namespace. Checking...", logPrefix());
}
LeaderInfo leaderInfo = ConfigMapLockUtils.getLeaderInfo(configMap, members, this.lockConfiguration.getGroupName());
boolean canAcquire = !leaderInfo.hasValidLeader();
if (canAcquire) {
// Try to be the new leader
try {
ConfigMap updatedConfigMap = ConfigMapLockUtils.getConfigMapWithNewLeader(configMap, newLeaderInfo);
kubernetesClient.configMaps().inNamespace(this.lockConfiguration.getKubernetesResourcesNamespaceOrDefault(kubernetesClient)).withName(this.lockConfiguration.getConfigMapName()).lockResourceVersion(configMap.getMetadata().getResourceVersion()).replace(updatedConfigMap);
if (logger.isDebugEnabled()) {
logger.debug("{} ConfigMap {} successfully updated", logPrefix(), this.lockConfiguration.getConfigMapName());
}
updateLatestLeaderInfo(updatedConfigMap, members);
return true;
} catch (Exception ex) {
logger.warn("{} Unable to update the lock ConfigMap to set leadership information", logPrefix());
logger.debug("{} Error received during configmap lock replace", logPrefix(), ex);
return false;
}
} else {
// Another pod is the leader and it's still active
if (logger.isDebugEnabled()) {
logger.debug("{} Another Pod ({}) is the current leader and it is still active", logPrefix(), this.latestLeaderInfo.getLeader());
}
return false;
}
}
}
Aggregations