use of io.fabric8.kubernetes.api.model.HasMetadata in project vertx-openshift-it by cescoffier.
the class CircuitBreakerIT method deployApp.
/**
* @param name
* @param templatePath
* @return the app route
* @throws IOException
*/
private static String deployApp(String name, String templatePath) throws IOException {
String appName = "";
List<? extends HasMetadata> entities = OPENSHIFT.deploy(name, new File(templatePath));
Optional<String> first = entities.stream().filter(hm -> hm instanceof DeploymentConfig).map(hm -> (DeploymentConfig) hm).map(dc -> dc.getMetadata().getName()).findFirst();
if (first.isPresent()) {
appName = first.get();
} else {
throw new IllegalStateException("Application deployment config not found");
}
Route route = OPENSHIFT.client().routes().inNamespace(OPENSHIFT.project()).withName(appName).get();
assertThat(route).isNotNull();
return "http://" + route.getSpec().getHost();
}
use of io.fabric8.kubernetes.api.model.HasMetadata in project vertx-openshift-it by cescoffier.
the class OpenShiftTestAssistant method deploy.
public List<? extends HasMetadata> deploy(String name, File template) throws IOException {
try (FileInputStream fis = new FileInputStream(template)) {
List<HasMetadata> entities = client.load(fis).createOrReplace();
created.put(name, entities);
System.out.println(name + " deployed, " + entities.size() + " object(s) created.");
return entities;
}
}
use of io.fabric8.kubernetes.api.model.HasMetadata in project strimzi by strimzi.
the class AbstractAssemblyOperator method reconcileAll.
/**
* Reconcile assembly resources in the given namespace having the given selector.
* Reconciliation works by getting the assembly ConfigMaps in the given namespace with the given selector and
* comparing with the corresponding {@linkplain #getResources(String) resource}.
* <ul>
* <li>An assembly will be {@linkplain #createOrUpdate(Reconciliation, ConfigMap, Handler) created} for all ConfigMaps without same-named resources</li>
* <li>An assembly will be {@linkplain #delete(Reconciliation, Handler) deleted} for all resources without same-named ConfigMaps</li>
* </ul>
*
* @param trigger A description of the triggering event (timer or watch), used for logging
* @param namespace The namespace
* @param selector The selector
*/
public final CountDownLatch reconcileAll(String trigger, String namespace, Labels selector) {
Labels selectorWithCluster = selector.withType(assemblyType);
// get ConfigMaps with kind=cluster&type=kafka (or connect, or connect-s2i) for the corresponding cluster type
List<ConfigMap> cms = configMapOperations.list(namespace, selectorWithCluster);
Set<String> cmsNames = cms.stream().map(cm -> cm.getMetadata().getName()).collect(Collectors.toSet());
log.debug("reconcileAll({}, {}): ConfigMaps with labels {}: {}", assemblyType, trigger, selectorWithCluster, cmsNames);
// get resources with kind=cluster&type=kafka (or connect, or connect-s2i)
List<? extends HasMetadata> resources = getResources(namespace);
// now extract the cluster name from those
Set<String> resourceNames = resources.stream().filter(// exclude Cluster CM, which won't have a cluster label
r -> Labels.kind(r) == null).map(Labels::cluster).collect(Collectors.toSet());
log.debug("reconcileAll({}, {}): Other resources with labels {}: {}", assemblyType, trigger, selectorWithCluster, resourceNames);
cmsNames.addAll(resourceNames);
// We use a latch so that callers (specifically, test callers) know when the reconciliation is complete
// Using futures would be more complex for no benefit
CountDownLatch latch = new CountDownLatch(cmsNames.size());
for (String name : cmsNames) {
Reconciliation reconciliation = new Reconciliation(trigger, assemblyType, namespace, name);
reconcileAssembly(reconciliation, result -> {
if (result.succeeded()) {
log.info("{}: Assembly reconciled", reconciliation);
} else {
log.error("{}: Failed to reconcile", reconciliation);
}
latch.countDown();
});
}
return latch;
}
use of io.fabric8.kubernetes.api.model.HasMetadata in project strimzi by strimzi.
the class LabelPredicateTest method testTest.
@Test
public void testTest() {
LabelPredicate p = new LabelPredicate("foo", "1", "bar", "2");
HasMetadata h = new ConfigMapBuilder().editOrNewMetadata().addToLabels("foo", "1").addToLabels("bar", "2").endMetadata().build();
assertTrue(p.test(h));
h = new ConfigMapBuilder().editOrNewMetadata().addToLabels("foo", "1").addToLabels("bar", "2").addToLabels("baz", "3").endMetadata().build();
assertTrue(p.test(h));
h = new ConfigMapBuilder().editOrNewMetadata().addToLabels("foo", "2").addToLabels("bar", "2").endMetadata().build();
assertFalse(p.test(h));
h = new ConfigMapBuilder().editOrNewMetadata().addToLabels("foo", "1").endMetadata().build();
assertFalse(p.test(h));
h = new ConfigMapBuilder().editOrNewMetadata().endMetadata().build();
assertFalse(p.test(h));
}
use of io.fabric8.kubernetes.api.model.HasMetadata in project strimzi by strimzi.
the class Controller method update3Way.
private void update3Way(HasMetadata involvedObject, Topic k8sTopic, Topic kafkaTopic, Topic privateTopic, Handler<AsyncResult<Void>> reconciliationResultHandler) {
if (!privateTopic.getMapName().equals(k8sTopic.getMapName())) {
reconciliationResultHandler.handle(Future.failedFuture(new ControllerException(involvedObject, "Topic '" + kafkaTopic.getTopicName() + "' is already managed via ConfigMap '" + privateTopic.getMapName() + "' it cannot also be managed via the ConfiMap '" + k8sTopic.getMapName() + "'")));
return;
}
TopicDiff oursKafka = TopicDiff.diff(privateTopic, kafkaTopic);
LOGGER.debug("topicStore->kafkaTopic: {}", oursKafka);
TopicDiff oursK8s = TopicDiff.diff(privateTopic, k8sTopic);
LOGGER.debug("topicStore->k8sTopic: {}", oursK8s);
String conflict = oursKafka.conflict(oursK8s);
if (conflict != null) {
final String message = "ConfigMap and Topic both changed in a conflicting way: " + conflict;
LOGGER.error(message);
enqueue(new Event(involvedObject, message, EventType.INFO, eventResult -> {
}));
reconciliationResultHandler.handle(Future.failedFuture(new Exception(message)));
} else {
TopicDiff merged = oursKafka.merge(oursK8s);
LOGGER.debug("Diffs do not conflict, merged diff: {}", merged);
if (merged.isEmpty()) {
LOGGER.info("All three topics are identical");
reconciliationResultHandler.handle(Future.succeededFuture());
} else {
Topic result = merged.apply(privateTopic);
int partitionsDelta = merged.numPartitionsDelta();
if (partitionsDelta < 0) {
final String message = "Number of partitions cannot be decreased";
LOGGER.error(message);
enqueue(new Event(involvedObject, message, EventType.INFO, eventResult -> {
}));
reconciliationResultHandler.handle(Future.failedFuture(new Exception(message)));
} else {
if (merged.changesReplicationFactor()) {
LOGGER.error("Changes replication factor");
enqueue(new ChangeReplicationFactor(result, involvedObject, null));
}
// TODO What if we increase min.in.sync.replicas and the number of replicas,
// such that the old number of replicas < the new min isr? But likewise
// we could decrease, so order of tasks in the queue will need to change
// depending on what the diffs are.
LOGGER.debug("Updating cm, kafka topic and topicStore");
// TODO replace this with compose
enqueue(new UpdateConfigMap(result, ar -> {
Handler<Void> topicStoreHandler = ignored -> enqueue(new UpdateInTopicStore(result, involvedObject, reconciliationResultHandler));
Handler<Void> partitionsHandler;
if (partitionsDelta > 0) {
partitionsHandler = ar4 -> enqueue(new IncreaseKafkaPartitions(result, involvedObject, ar2 -> topicStoreHandler.handle(null)));
} else {
partitionsHandler = topicStoreHandler;
}
if (merged.changesConfig()) {
enqueue(new UpdateKafkaConfig(result, involvedObject, ar2 -> partitionsHandler.handle(null)));
} else {
enqueue(partitionsHandler);
}
}));
}
}
}
}
Aggregations