use of io.fabric8.kubernetes.model.annotation.Version in project quarkus by quarkusio.
the class BasicKubernetesTest method assertGeneratedResources.
@Test
public void assertGeneratedResources() throws IOException {
final Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes");
assertThat(kubernetesDir).isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.json")).isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.yml")).satisfies(p -> assertThat(p.toFile().listFiles()).hasSize(2));
List<HasMetadata> kubernetesList = DeserializationUtil.deserializeAsList(kubernetesDir.resolve("kubernetes.yml"));
assertThat(kubernetesList).hasSize(2);
assertThat(kubernetesList.get(0)).isInstanceOfSatisfying(Deployment.class, d -> {
assertThat(d.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo("basic");
assertThat(m.getNamespace()).isNull();
});
assertThat(d.getSpec()).satisfies(deploymentSpec -> {
assertThat(deploymentSpec.getSelector()).isNotNull().satisfies(labelSelector -> {
assertThat(labelSelector.getMatchLabels()).containsOnly(entry("app.kubernetes.io/name", "basic"), entry("app.kubernetes.io/version", "0.1-SNAPSHOT"));
});
assertThat(deploymentSpec.getTemplate()).satisfies(t -> {
assertThat(t.getSpec()).satisfies(podSpec -> {
assertThat(podSpec.getSecurityContext()).isNull();
assertThat(podSpec.getContainers()).singleElement().satisfies(container -> {
// expect the default value
assertThat(container.getImagePullPolicy()).isEqualTo("Always");
assertThat(container.getPorts()).singleElement().satisfies(p -> {
assertThat(p.getContainerPort()).isEqualTo(8080);
});
});
});
});
});
});
assertThat(kubernetesList.get(1)).isInstanceOfSatisfying(Service.class, s -> {
assertThat(s.getMetadata()).satisfies(m -> {
assertThat(m.getNamespace()).isNull();
});
assertThat(s.getSpec()).satisfies(spec -> {
assertThat(spec.getSelector()).containsOnly(entry("app.kubernetes.io/name", "basic"), entry("app.kubernetes.io/version", "0.1-SNAPSHOT"));
assertThat(spec.getPorts()).hasSize(1).singleElement().satisfies(p -> {
assertThat(p.getPort()).isEqualTo(80);
assertThat(p.getTargetPort().getIntVal()).isEqualTo(8080);
});
});
});
}
use of io.fabric8.kubernetes.model.annotation.Version in project couchbase-elasticsearch-connector by couchbase.
the class ElasticsearchConnector method run.
public static void run(ConnectorConfig config, PanicButton panicButton, Duration startupQuietPeriod) throws Throwable {
final Throwable fatalError;
final Membership membership = config.group().staticMembership();
LOGGER.info("Read configuration: {}", redactSystem(config));
final ScheduledExecutorService checkpointExecutor = Executors.newSingleThreadScheduledExecutor();
try (Slf4jReporter metricReporter = newSlf4jReporter(config.metrics().logInterval());
HttpServer httpServer = new HttpServer(config.metrics().httpPort(), membership);
RestHighLevelClient esClient = newElasticsearchClient(config.elasticsearch(), config.trustStore())) {
DocumentLifecycle.setLogLevel(config.logging().logDocumentLifecycle() ? LogLevel.INFO : LogLevel.DEBUG);
LogRedaction.setRedactionLevel(config.logging().redactionLevel());
DcpHelper.setRedactionLevel(config.logging().redactionLevel());
final ClusterEnvironment env = CouchbaseHelper.environmentBuilder(config.couchbase(), config.trustStore()).build();
final Cluster cluster = CouchbaseHelper.createCluster(config.couchbase(), env);
final Version elasticsearchVersion = waitForElasticsearchAndRequireVersion(esClient, new Version(2, 0, 0), new Version(5, 6, 16));
LOGGER.info("Elasticsearch version {}", elasticsearchVersion);
validateConfig(elasticsearchVersion, config.elasticsearch());
// Wait for couchbase server to come online, then open the bucket.
final Bucket bucket = CouchbaseHelper.waitForBucket(cluster, config.couchbase().bucket());
final Set<SeedNode> kvNodes = CouchbaseHelper.getKvNodes(config.couchbase(), bucket);
final boolean storeMetadataInSourceBucket = config.couchbase().metadataBucket().equals(config.couchbase().bucket());
final Bucket metadataBucket = storeMetadataInSourceBucket ? bucket : CouchbaseHelper.waitForBucket(cluster, config.couchbase().metadataBucket());
final Collection metadataCollection = CouchbaseHelper.getMetadataCollection(metadataBucket, config.couchbase());
final CheckpointDao checkpointDao = new CouchbaseCheckpointDao(metadataCollection, config.group().name());
// todo get this from dcp client
final String bucketUuid = "";
final CheckpointService checkpointService = new CheckpointService(bucketUuid, checkpointDao);
final RequestFactory requestFactory = new RequestFactory(config.elasticsearch().types(), config.elasticsearch().docStructure(), config.elasticsearch().rejectLog());
final ElasticsearchWorkerGroup workers = new ElasticsearchWorkerGroup(esClient, checkpointService, requestFactory, ErrorListener.NOOP, config.elasticsearch().bulkRequest());
Metrics.gauge("write.queue", "Document events currently buffered in memory.", workers, ElasticsearchWorkerGroup::getQueueSize);
// High value indicates the connector has stalled
Metrics.gauge("es.wait.ms", null, workers, ElasticsearchWorkerGroup::getCurrentRequestMillis);
// Same as "es.wait.ms" but normalized to seconds for Prometheus
Metrics.gauge("es.wait.seconds", "Duration of in-flight Elasticsearch bulk request (including any retries). Long duration may indicate connector has stalled.", workers, value -> value.getCurrentRequestMillis() / (double) SECONDS.toMillis(1));
final Client dcpClient = DcpHelper.newClient(config.group().name(), config.couchbase(), kvNodes, config.trustStore());
initEventListener(dcpClient, panicButton, workers::submit);
final Thread saveCheckpoints = new Thread(checkpointService::save, "save-checkpoints");
try {
try {
dcpClient.connect().block(Duration.ofMillis(config.couchbase().dcp().connectTimeout().millis()));
} catch (Throwable t) {
panicButton.panic("Failed to establish initial DCP connection within " + config.couchbase().dcp().connectTimeout(), t);
}
final int numPartitions = dcpClient.numPartitions();
LOGGER.info("Bucket has {} partitions. Membership = {}", numPartitions, membership);
final Set<Integer> partitions = membership.getPartitions(numPartitions);
if (partitions.isEmpty()) {
// need to do this check, because if we started streaming with an empty list, the DCP client would open streams for *all* partitions
throw new IllegalArgumentException("There are more workers than Couchbase vbuckets; this worker doesn't have any work to do.");
}
checkpointService.init(numPartitions, () -> DcpHelper.getCurrentSeqnosAsMap(dcpClient, partitions, Duration.ofSeconds(5)));
dcpClient.initializeState(StreamFrom.BEGINNING, StreamTo.INFINITY).block();
initSessionState(dcpClient, checkpointService, partitions);
// configuration problems.
if (!startupQuietPeriod.isZero()) {
LOGGER.info("Entering startup quiet period; sleeping for {} so peers can terminate in case of unsafe scaling.", startupQuietPeriod);
MILLISECONDS.sleep(startupQuietPeriod.toMillis());
LOGGER.info("Startup quiet period complete.");
}
checkpointExecutor.scheduleWithFixedDelay(checkpointService::save, 10, 10, SECONDS);
RuntimeHelper.addShutdownHook(saveCheckpoints);
// Unless shutdown is due to panic...
panicButton.addPrePanicHook(() -> RuntimeHelper.removeShutdownHook(saveCheckpoints));
try {
LOGGER.debug("Opening DCP streams for partitions: {}", partitions);
dcpClient.startStreaming(partitions).block();
} catch (RuntimeException e) {
ThrowableHelper.propagateCauseIfPossible(e, InterruptedException.class);
throw e;
}
// Start HTTP server *after* other setup is complete, so the metrics endpoint
// can be used as a "successful startup" probe.
httpServer.start();
if (config.metrics().httpPort() >= 0) {
LOGGER.info("Prometheus metrics available at http://localhost:{}/metrics/prometheus", httpServer.getBoundPort());
LOGGER.info("Dropwizard metrics available at http://localhost:{}/metrics/dropwizard?pretty", httpServer.getBoundPort());
} else {
LOGGER.info("Metrics HTTP server is disabled. Edit the [metrics] 'httpPort' config property to enable.");
}
LOGGER.info("Elasticsearch connector startup complete.");
fatalError = workers.awaitFatalError();
LOGGER.error("Terminating due to fatal error from worker", fatalError);
} catch (InterruptedException shutdownRequest) {
LOGGER.info("Graceful shutdown requested. Saving checkpoints and cleaning up.");
checkpointService.save();
throw shutdownRequest;
} catch (Throwable t) {
LOGGER.error("Terminating due to fatal error during setup", t);
throw t;
} finally {
// If we get here it means there was a fatal exception, or the connector is running in distributed
// or test mode and a graceful shutdown was requested. Don't need the shutdown hook for any of those cases.
RuntimeHelper.removeShutdownHook(saveCheckpoints);
checkpointExecutor.shutdown();
metricReporter.stop();
dcpClient.disconnect().block();
// to avoid buffer leak, must close *after* dcp client stops feeding it events
workers.close();
checkpointExecutor.awaitTermination(10, SECONDS);
cluster.disconnect();
// can't reuse, because connector config might have different SSL settings next time
env.shutdown();
}
}
// give stdout a chance to quiet down so the stack trace on stderr isn't interleaved with stdout.
MILLISECONDS.sleep(500);
throw fatalError;
}
use of io.fabric8.kubernetes.model.annotation.Version in project couchbase-elasticsearch-connector by couchbase.
the class ReplicaChangeWatcher method getReplicasAndPanicOnChange.
/**
* Starts watching the replica count and panics if the count changes
* or can't be determined.
*
* @return initial number of replicas
*/
public static int getReplicasAndPanicOnChange(KubernetesClient client, PanicButton panicButton) {
try {
StatefulSetInfo info = StatefulSetInfo.fromHostname();
String k8sName = info.name;
log.info("Kubernetes API version = {}", client.discovery().v1().getApiVersion());
String namespace = client.discovery().v1().getNamespace();
log.info("Kubernetes namespace = {}", namespace);
StatefulSet statefulSet = client.apps().statefulSets().inNamespace(namespace).withName(k8sName).get();
requireNonNull(statefulSet, "Failed to get StatefulSet in namespace '" + namespace + "' with name '" + k8sName + "'");
int initialReplicas = statefulSet.getSpec().getReplicas();
log.info("StatefulSet replicas = {}", initialReplicas);
log.info("Kubernetes API resync period = {} ; kill switch deadline = {}", resyncPeriod, killSwitchDeadline);
KillSwitch killSwitch = KillSwitch.start(killSwitchDeadline, () -> panicButton.panic("The connector contacts the Kubernetes API server every " + resyncPeriod + " to verify the StatefulSet's replica count has not changed," + " but " + killSwitchDeadline + " has elapsed without a successful response." + " Terminating to ensure multiple pods don't process the same Couchbase partitions."));
SharedIndexInformer<StatefulSet> informer = client.apps().statefulSets().inNamespace(namespace).withName(k8sName).inform(new ResourceEventHandler<>() {
// When the K8S API server is reachable, an update happens once per resync period
// and also immediately when the informer detects a change to the resource.
@Override
public void onUpdate(StatefulSet oldSet, StatefulSet newSet) {
killSwitch.reset();
int newReplicas = newSet.getSpec().getReplicas();
if (newReplicas != 0 && newReplicas != initialReplicas) {
// Panic to terminate the connector and let Kubernetes restart it.
// This is simpler than trying to stream from different vbuckets on the fly.
panicButton.mildPanic("The connector is automatically restarting because" + " it detected a change to the number of replicas in its StatefulSet." + " This is the intended behavior, and not a cause for alarm." + " Upon restart, the connector will wait for a quiet period to elapse, giving all pods" + " a chance to notice the change and shut down. This prevents multiple pods" + " from processing the same Couchbase partitions." + " There is a [very small] chance this strategy could fail, in which case" + " stale versions of some documents could be written to Elasticsearch and remain" + " until the documents are modified again in Couchbase." + " To be absolutely sure this never happens, we recommend first scaling" + " the StatefulSet down to zero (or deleting it) and waiting for pods" + " to terminate before scaling back up to the desired number of replicas.");
}
}
@Override
public void onAdd(StatefulSet it) {
}
@Override
public void onDelete(StatefulSet it, boolean deletedFinalStateUnknown) {
}
}, resyncPeriod.toMillis());
return initialReplicas;
} catch (Throwable t) {
panicButton.panic("Failed to get/watch StatefulSet replica count.", t);
// unreachable
throw t;
}
}
use of io.fabric8.kubernetes.model.annotation.Version in project xp-operator by enonic.
the class AdmissionApi method xp7deployment.
private void xp7deployment(AdmissionReview admissionReview) {
AdmissionOperation op = getOperation(admissionReview);
Xp7Deployment newDeployment = (Xp7Deployment) admissionReview.getRequest().getObject();
if (op != AdmissionOperation.DELETE) {
// Check spec
Preconditions.checkState(newDeployment.getSpec() != null, "'spec' cannot be null");
Preconditions.checkState(newDeployment.getSpec().getEnabled() != null, "'spec.enabled' cannot be null");
Preconditions.checkState(newDeployment.getSpec().getXpVersion() != null, "'spec.xpVersion' cannot be null");
Preconditions.checkState(newDeployment.getSpec().getXp7DeploymentSpecNodesSharedDisks() != null, "'spec.nodesSharedDisks' cannot be null");
Preconditions.checkState(newDeployment.getSpec().getXp7DeploymentSpecNodeGroups() != null, "'spec.nodeGroups' cannot be null");
// Check status
Preconditions.checkState(newDeployment.getStatus() != null, "'status' cannot be null");
Preconditions.checkState(newDeployment.getStatus().getMessage() != null, "'status.message' cannot be null");
Preconditions.checkState(newDeployment.getStatus().getState() != null, "'status.state' cannot be null");
Preconditions.checkState(newDeployment.getStatus().getXp7DeploymentStatusFields() != null, "'status.fields' cannot be null");
Preconditions.checkState(newDeployment.getStatus().getXp7DeploymentStatusFields().getXp7DeploymentStatusFieldsPods() != null, "'status.fields.pods' cannot be null");
// Check node groups
int nrOfMasterNodes = 0;
int i = 0;
for (Xp7DeploymentSpecNodeGroup ng : newDeployment.getSpec().getXp7DeploymentSpecNodeGroups()) {
Preconditions.checkState(ng.getName() != null, "'spec.nodeGroups[" + i + "].name' cannot be null");
Preconditions.checkState(!ng.getName().equals(cfgStr("operator.charts.values.allNodesKey")), "'spec.nodeGroups[" + i + "].name' cannot be " + cfgStr("operator.charts.values.allNodesKey"));
dns1123("spec.nodeGroups[" + i + "].name", ng.getName());
Preconditions.checkState(ng.getData() != null, "'spec.nodeGroups[" + i + "].data' cannot be null");
Preconditions.checkState(ng.getMaster() != null, "'spec.nodeGroups[" + i + "].master' cannot be null");
Preconditions.checkState(ng.getReplicas() != null, "'spec.nodeGroups[" + i + "].replicas' cannot be null");
Preconditions.checkState(ng.getReplicas() >= 0, "'spec.nodeGroups[" + i + "].replicas' has to be >= 0");
Preconditions.checkState(ng.getXp7DeploymentSpecNodeGroupEnvironment() != null, "'spec.nodeGroups[" + i + "].env' cannot be null");
Preconditions.checkState(ng.getXp7DeploymentSpecNodeGroupResources() != null, "'spec.nodeGroups[" + i + "].resources' cannot be null");
Preconditions.checkState(ng.getXp7DeploymentSpecNodeGroupResources().getCpu() != null, "'spec.nodeGroups[" + i + "].resources.cpu' cannot be null");
Preconditions.checkState(ng.getXp7DeploymentSpecNodeGroupResources().getMemory() != null, "'spec.nodeGroups[" + i + "].resources.memory' cannot be null");
Preconditions.checkState(ng.getXp7DeploymentSpecNodeGroupResources().getMemory().contains("Mi") || ng.getXp7DeploymentSpecNodeGroupResources().getMemory().contains("Gi"), "'spec.nodeGroups[" + i + "].resources.memory' can only be defined with Gi or Mi");
Preconditions.checkState(ng.getXp7DeploymentSpecNodeGroupResources().getXp7DeploymentSpecNodeGroupDisks() != null, "'spec.nodeGroups[" + i + "].resources.disks' cannot be null");
// Check disks
if (ng.getData()) {
Preconditions.checkState(ng.getXp7DeploymentSpecNodeGroupResources().getXp7DeploymentSpecNodeGroupDisks().stream().anyMatch(d -> d.getName().equals("index")), "nodes with data=true must have disk 'index' defined");
}
if (ng.getMaster()) {
nrOfMasterNodes += ng.getReplicas();
}
i++;
}
// Check replicas
Preconditions.checkState(nrOfMasterNodes > 0, "some nodeGroups must have master=true");
Preconditions.checkState(nrOfMasterNodes % 2 == 1, "number of master nodes has to be an odd number");
}
// Strict label and name validation
cfgIfBool("operator.deployment.xp.labels.strictValidation", () -> {
Preconditions.checkState(newDeployment.getMetadata() != null, "'metadata' cannot be null");
Preconditions.checkState(newDeployment.getMetadata().getLabels() != null, "'metadata.labels' cannot be null");
String cloud = newDeployment.getMetadata().getLabels().get(cfgStr("operator.deployment.xp.labels.cloud"));
String solution = newDeployment.getMetadata().getLabels().get(cfgStr("operator.deployment.xp.labels.solution"));
String environment = newDeployment.getMetadata().getLabels().get(cfgStr("operator.deployment.xp.labels.environment"));
String service = newDeployment.getMetadata().getLabels().get(cfgStr("operator.deployment.xp.labels.service"));
Preconditions.checkState(cloud != null, String.format("'metadata.labels.%s' cannot be null", cfgStr("operator.deployment.xp.labels.cloud")));
Preconditions.checkState(solution != null, String.format("'metadata.labels.%s' cannot be null", cfgStr("operator.deployment.xp.labels.solution")));
Preconditions.checkState(environment != null, String.format("'metadata.labels.%s' cannot be null", cfgStr("operator.deployment.xp.labels.environment")));
Preconditions.checkState(service != null, String.format("'metadata.labels.%s' cannot be null", cfgStr("operator.deployment.xp.labels.service")));
String name = String.format("%s-%s-%s-%s", cloud, solution, environment, service);
Preconditions.checkState(newDeployment.getMetadata().getName().equals(name), String.format("Xp7Deployment name must be equal to <Cloud>-<Solution>-<Environment>-<Service> according to labels, i.e: '%s'", name));
});
if (op == AdmissionOperation.CREATE) {
Optional<Xp7Deployment> xp7Deployments = getXp7Deployment(admissionReview.getRequest().getObject());
Preconditions.checkState(xp7Deployments.isEmpty(), "There is already an Xp7Deployment in NS '%s'", newDeployment.getMetadata().getNamespace());
// Assert version is > 7.7.X, if we cant parse version, just let it go
ComparableVersion currentVersion = new ComparableVersion("7.7.0");
try {
if (newDeployment.getSpec().getXpVersion().startsWith("7.")) {
currentVersion = new ComparableVersion(newDeployment.getSpec().getXpVersion());
} else if (newDeployment.getSpec().getXpVersion().startsWith("enonic/xp:7.")) {
String pattern = "^enonic\\/xp:([0-9]+\\.[0-9]+\\.[0-9]+)";
Matcher m = Pattern.compile(pattern).matcher(newDeployment.getSpec().getXpVersion());
if (m.find()) {
currentVersion = new ComparableVersion(m.group(1));
}
}
} catch (Exception e) {
// Just ignore
}
Preconditions.checkState(currentVersion.compareTo(new ComparableVersion("7.6.100")) > 0, "Operator only supports XP version 7.7 and higher");
}
}
use of io.fabric8.kubernetes.model.annotation.Version in project xp-operator by enonic.
the class CrudTest method assertContext.
private void assertContext(final CustomResourceDefinition customResourceDefinition, final CustomResourceDefinitionContext customResourceDefinitionContext) {
String version = null;
for (CustomResourceDefinitionVersion v : customResourceDefinition.getSpec().getVersions()) {
if (v.getName().equals(customResourceDefinitionContext.getVersion())) {
version = v.getName();
}
}
assertEquals(String.format("%s.%s", customResourceDefinition.getSpec().getNames().getPlural(), customResourceDefinition.getSpec().getGroup()), customResourceDefinitionContext.getName());
assertEquals(customResourceDefinition.getSpec().getGroup(), customResourceDefinitionContext.getGroup());
assertEquals(customResourceDefinition.getSpec().getScope(), customResourceDefinitionContext.getScope());
assertEquals(customResourceDefinition.getSpec().getNames().getPlural(), customResourceDefinitionContext.getPlural());
assertEquals(customResourceDefinition.getSpec().getVersions().get(0).getName(), customResourceDefinitionContext.getVersion());
assertEquals(customResourceDefinition.getSpec().getNames().getKind(), customResourceDefinitionContext.getKind());
assertEquals(version, customResourceDefinitionContext.getVersion());
}
Aggregations