use of io.fabric8.kubernetes.client.Watcher in project fabric8-maven-plugin by fabric8io.
the class WatcherManager method watch.
public static void watch(List<ImageConfiguration> ret, Set<HasMetadata> resources, WatcherContext watcherCtx) throws Exception {
PluginServiceFactory<WatcherContext> pluginFactory = watcherCtx.isUseProjectClasspath() ? new PluginServiceFactory<>(watcherCtx, ClassUtil.createProjectClassLoader(watcherCtx.getProject(), watcherCtx.getLogger())) : new PluginServiceFactory<>(watcherCtx);
boolean isOpenshift = KubernetesHelper.isOpenShift(watcherCtx.getKubernetesClient());
PlatformMode mode = isOpenshift ? PlatformMode.openshift : PlatformMode.kubernetes;
List<Watcher> watchers = pluginFactory.createServiceObjects("META-INF/fabric8/watcher-default", "META-INF/fabric8/fabric8-watcher-default", "META-INF/fabric8/watcher", "META-INF/fabric8-watcher");
ProcessorConfig config = watcherCtx.getConfig();
Logger log = watcherCtx.getLogger();
List<Watcher> usableWatchers = config.prepareProcessors(watchers, "watcher");
log.verbose("Watchers:");
Watcher chosen = null;
for (Watcher watcher : usableWatchers) {
if (watcher.isApplicable(ret, resources, mode)) {
if (chosen == null) {
log.verbose(" - %s [selected]", watcher.getName());
chosen = watcher;
} else {
log.verbose(" - %s", watcher.getName());
}
} else {
log.verbose(" - %s [not applicable]", watcher.getName());
}
}
if (chosen == null) {
throw new IllegalStateException("No watchers can be used for the current project");
}
log.info("Running watcher %s", chosen.getName());
chosen.watch(ret, resources, mode);
}
use of io.fabric8.kubernetes.client.Watcher in project strimzi by strimzi.
the class StatefulSetOperator method restartPod.
private Future<Void> restartPod(String namespace, String name, Predicate<String> isReady, String podName) {
Future<Void> result = Future.future();
log.info("Roll {}/{}: Rolling pod {}", namespace, name, podName);
Future<Void> deleted = Future.future();
Future<CompositeFuture> deleteFinished = Future.future();
Watcher<Pod> watcher = new RollingUpdateWatcher(deleted);
Watch watch = podOperations.watch(namespace, podName, watcher);
// Delete the pod
log.debug("Roll {}/{}: Waiting for pod {} to be deleted", namespace, name, podName);
Future podReconcileFuture = podOperations.reconcile(namespace, podName, null);
CompositeFuture.join(podReconcileFuture, deleted).setHandler(deleteResult -> {
watch.close();
if (deleteResult.succeeded()) {
log.debug("Roll {}/{}: Pod {} was deleted", namespace, name, podName);
}
deleteFinished.handle(deleteResult);
});
deleteFinished.compose(ix -> {
log.debug("Roll {}/{}: Waiting for new pod {} to get ready", namespace, name, podName);
Future<Void> readyFuture = Future.future();
vertx.setPeriodic(1_000, timerId -> {
p(isReady, podName).setHandler(x -> {
if (x.succeeded()) {
if (x.result()) {
vertx.cancelTimer(timerId);
readyFuture.complete();
}
// else not ready
} else {
vertx.cancelTimer(timerId);
readyFuture.fail(x.cause());
}
});
});
return readyFuture;
}).setHandler(result);
return result;
}
use of io.fabric8.kubernetes.client.Watcher in project strimzi by strimzi.
the class ClusterControllerTest method startStop.
/**
* Does the CC start and then stop a verticle per namespace?
* @param context
* @param namespaces
*/
private void startStop(TestContext context, String namespaces) {
AtomicInteger numWatchers = new AtomicInteger(0);
KubernetesClient client = mock(KubernetesClient.class);
MixedOperation mockCms = mock(MixedOperation.class);
when(client.configMaps()).thenReturn(mockCms);
List<String> namespaceList = asList(namespaces.split(" *,+ *"));
for (String namespace : namespaceList) {
MixedOperation mockNamespacedCms = mock(MixedOperation.class);
when(mockNamespacedCms.watch(any())).thenAnswer(invo -> {
numWatchers.incrementAndGet();
Watch mockWatch = mock(Watch.class);
doAnswer(invo2 -> {
((Watcher) invo.getArgument(0)).onClose(null);
return null;
}).when(mockWatch).close();
return mockWatch;
});
when(mockNamespacedCms.withLabels(any())).thenReturn(mockNamespacedCms);
when(mockCms.inNamespace(namespace)).thenReturn(mockNamespacedCms);
}
Async async = context.async();
Map<String, String> env = new HashMap<>();
env.put(ClusterControllerConfig.STRIMZI_NAMESPACE, namespaces);
env.put(ClusterControllerConfig.STRIMZI_CONFIGMAP_LABELS, STRIMZI_IO_KIND_CLUSTER);
env.put(ClusterControllerConfig.STRIMZI_FULL_RECONCILIATION_INTERVAL_MS, "120000");
Main.run(vertx, client, true, env).setHandler(ar -> {
context.assertNull(ar.cause(), "Expected all verticles to start OK");
async.complete();
});
async.await();
context.assertEquals(namespaceList.size(), vertx.deploymentIDs().size(), "A verticle per namespace");
List<Async> asyncs = new ArrayList<>();
for (String deploymentId : vertx.deploymentIDs()) {
Async async2 = context.async();
asyncs.add(async2);
vertx.undeploy(deploymentId, ar -> {
context.assertNull(ar.cause(), "Didn't expect error when undeploying verticle " + deploymentId);
async2.complete();
});
}
for (Async async2 : asyncs) {
async2.await();
}
if (numWatchers.get() > namespaceList.size()) {
context.fail("Looks like there were more watchers than namespaces");
}
}
use of io.fabric8.kubernetes.client.Watcher in project strimzi by strimzi.
the class MockKube method buildStatefulSets.
private MixedOperation<StatefulSet, StatefulSetList, DoneableStatefulSet, RollableScalableResource<StatefulSet, DoneableStatefulSet>> buildStatefulSets(MixedOperation<Pod, PodList, DoneablePod, PodResource<Pod, DoneablePod>> mockPods) {
return new AbstractMockBuilder<StatefulSet, StatefulSetList, DoneableStatefulSet, RollableScalableResource<StatefulSet, DoneableStatefulSet>>(StatefulSet.class, StatefulSetList.class, DoneableStatefulSet.class, castClass(RollableScalableResource.class), ssDb) {
@Override
protected void nameScopedMocks(RollableScalableResource<StatefulSet, DoneableStatefulSet> resource, String resourceName) {
mockGet(resourceName, resource);
// mockCreate("endpoint", endpointDb, resourceName, resource);
mockCascading(resource);
mockPatch(resourceName, resource);
mockDelete(resourceName, resource);
mockIsReady(resourceName, resource);
when(resource.create(any())).thenAnswer(cinvocation -> {
checkNotExists(resourceName);
StatefulSet argument = cinvocation.getArgument(0);
LOGGER.debug("create {} {} -> {}", resourceType, resourceName, argument);
ssDb.put(resourceName, copyResource(argument));
for (int i = 0; i < argument.getSpec().getReplicas(); i++) {
String podName = argument.getMetadata().getName() + "-" + i;
podDb.put(podName, new PodBuilder().withNewMetadata().withNamespace(argument.getMetadata().getNamespace()).withName(podName).endMetadata().build());
}
return argument;
});
EditReplacePatchDeletable<StatefulSet, StatefulSet, DoneableStatefulSet, Boolean> c = mock(EditReplacePatchDeletable.class);
when(resource.cascading(false)).thenReturn(c);
when(c.patch(any())).thenAnswer(patchInvocation -> {
StatefulSet argument = patchInvocation.getArgument(0);
return doPatch(resourceName, argument);
});
when(resource.scale(anyInt(), anyBoolean())).thenAnswer(invocation -> {
checkDoesExist(resourceName);
StatefulSet ss = copyResource(ssDb.get(resourceName));
int newScale = invocation.getArgument(0);
ss.getSpec().setReplicas(newScale);
return doPatch(resourceName, ss);
});
when(resource.scale(anyInt())).thenAnswer(invocation -> {
checkDoesExist(resourceName);
StatefulSet ss = copyResource(ssDb.get(resourceName));
int newScale = invocation.getArgument(0);
ss.getSpec().setReplicas(newScale);
return doPatch(resourceName, ss);
});
when(resource.isReady()).thenAnswer(i -> {
LOGGER.debug("{} {} is ready", resourceType, resourceName);
return true;
});
mockPods.inNamespace(any()).withName(any()).watch(new Watcher<Pod>() {
@Override
public void eventReceived(Action action, Pod resource) {
if (action == Action.DELETED) {
String podName = resource.getMetadata().getName();
String podNamespace = resource.getMetadata().getNamespace();
StatefulSet statefulSet = ssDb.get(resourceName);
if (podName.startsWith(resourceName + "-") && Integer.parseInt(podName.substring(podName.lastIndexOf("-") + 1)) < statefulSet.getSpec().getReplicas()) {
mockPods.inNamespace(podNamespace).withName(podName).create(resource);
}
}
}
@Override
public void onClose(KubernetesClientException e) {
}
});
}
private StatefulSet doPatch(String resourceName, StatefulSet argument) {
int oldScale = ssDb.get(resourceName).getSpec().getReplicas();
int newScale = argument.getSpec().getReplicas();
if (newScale > oldScale) {
LOGGER.debug("scaling up {} {} from {} to {}", resourceType, resourceName, oldScale, newScale);
Pod examplePod = mockPods.inNamespace(argument.getMetadata().getNamespace()).withName(argument.getMetadata().getName() + "-0").get();
for (int i = oldScale; i < newScale; i++) {
String newPodName = argument.getMetadata().getName() + "-" + i;
mockPods.inNamespace(argument.getMetadata().getNamespace()).withName(newPodName).create(new PodBuilder(examplePod).editMetadata().withName(newPodName).endMetadata().build());
}
ssDb.put(resourceName, copyResource(argument));
} else if (newScale < oldScale) {
ssDb.put(resourceName, copyResource(argument));
LOGGER.debug("scaling down {} {} from {} to {}", resourceType, resourceName, oldScale, newScale);
for (int i = oldScale - 1; i >= newScale; i--) {
String newPodName = argument.getMetadata().getName() + "-" + i;
mockPods.inNamespace(argument.getMetadata().getNamespace()).withName(newPodName).delete();
}
} else {
ssDb.put(resourceName, copyResource(argument));
}
return argument;
}
}.build();
}
use of io.fabric8.kubernetes.client.Watcher in project strimzi by strimzi.
the class ControllerIT method setup.
@Before
public void setup(TestContext context) throws Exception {
LOGGER.info("Setting up test");
Runtime.getRuntime().addShutdownHook(kafkaHook);
kafkaCluster = new KafkaCluster();
kafkaCluster.addBrokers(1);
kafkaCluster.deleteDataPriorToStartup(true);
kafkaCluster.deleteDataUponShutdown(true);
kafkaCluster.usingDirectory(Files.createTempDirectory("controller-integration-test").toFile());
kafkaCluster.startup();
kubeClient = new DefaultKubernetesClient().inNamespace(NAMESPACE);
LOGGER.info("Using namespace {}", NAMESPACE);
Map<String, String> m = new HashMap();
m.put(Config.KAFKA_BOOTSTRAP_SERVERS.key, kafkaCluster.brokerList());
m.put(Config.ZOOKEEPER_CONNECT.key, "localhost:" + zkPort(kafkaCluster));
m.put(Config.NAMESPACE.key, NAMESPACE);
session = new Session(kubeClient, new Config(m));
Async async = context.async();
vertx.deployVerticle(session, ar -> {
if (ar.succeeded()) {
deploymentId = ar.result();
adminClient = session.adminClient;
topicsConfigWatcher = session.topicConfigsWatcher;
topicWatcher = session.topicWatcher;
topicsWatcher = session.topicsWatcher;
async.complete();
} else {
context.fail("Failed to deploy session");
}
});
async.await();
waitFor(context, () -> this.topicsWatcher.started(), timeout, "Topics watcher not started");
waitFor(context, () -> this.topicsConfigWatcher.started(), timeout, "Topic configs watcher not started");
waitFor(context, () -> this.topicWatcher.started(), timeout, "Topic watcher not started");
// We can't delete events, so record the events which exist at the start of the test
// and then waitForEvents() can ignore those
preExistingEvents = kubeClient.events().inNamespace(NAMESPACE).withLabels(cmPredicate.labels()).list().getItems().stream().map(evt -> evt.getMetadata().getUid()).collect(Collectors.toSet());
LOGGER.info("Finished setting up test");
}
Aggregations