Search in sources :

Example 11 with Watcher

use of io.fabric8.maven.watcher.api.Watcher in project strimzi by strimzi.

the class MockKube method buildStatefulSets.

private MixedOperation<StatefulSet, StatefulSetList, DoneableStatefulSet, RollableScalableResource<StatefulSet, DoneableStatefulSet>> buildStatefulSets(MixedOperation<Pod, PodList, DoneablePod, PodResource<Pod, DoneablePod>> mockPods) {
    return new AbstractMockBuilder<StatefulSet, StatefulSetList, DoneableStatefulSet, RollableScalableResource<StatefulSet, DoneableStatefulSet>>(StatefulSet.class, StatefulSetList.class, DoneableStatefulSet.class, castClass(RollableScalableResource.class), ssDb) {

        @Override
        protected void nameScopedMocks(RollableScalableResource<StatefulSet, DoneableStatefulSet> resource, String resourceName) {
            mockGet(resourceName, resource);
            // mockCreate("endpoint", endpointDb, resourceName, resource);
            mockCascading(resource);
            mockPatch(resourceName, resource);
            mockDelete(resourceName, resource);
            mockIsReady(resourceName, resource);
            when(resource.create(any())).thenAnswer(cinvocation -> {
                checkNotExists(resourceName);
                StatefulSet argument = cinvocation.getArgument(0);
                LOGGER.debug("create {} {} -> {}", resourceType, resourceName, argument);
                ssDb.put(resourceName, copyResource(argument));
                for (int i = 0; i < argument.getSpec().getReplicas(); i++) {
                    String podName = argument.getMetadata().getName() + "-" + i;
                    podDb.put(podName, new PodBuilder().withNewMetadata().withNamespace(argument.getMetadata().getNamespace()).withName(podName).endMetadata().build());
                }
                return argument;
            });
            EditReplacePatchDeletable<StatefulSet, StatefulSet, DoneableStatefulSet, Boolean> c = mock(EditReplacePatchDeletable.class);
            when(resource.cascading(false)).thenReturn(c);
            when(c.patch(any())).thenAnswer(patchInvocation -> {
                StatefulSet argument = patchInvocation.getArgument(0);
                return doPatch(resourceName, argument);
            });
            when(resource.scale(anyInt(), anyBoolean())).thenAnswer(invocation -> {
                checkDoesExist(resourceName);
                StatefulSet ss = copyResource(ssDb.get(resourceName));
                int newScale = invocation.getArgument(0);
                ss.getSpec().setReplicas(newScale);
                return doPatch(resourceName, ss);
            });
            when(resource.scale(anyInt())).thenAnswer(invocation -> {
                checkDoesExist(resourceName);
                StatefulSet ss = copyResource(ssDb.get(resourceName));
                int newScale = invocation.getArgument(0);
                ss.getSpec().setReplicas(newScale);
                return doPatch(resourceName, ss);
            });
            when(resource.isReady()).thenAnswer(i -> {
                LOGGER.debug("{} {} is ready", resourceType, resourceName);
                return true;
            });
            mockPods.inNamespace(any()).withName(any()).watch(new Watcher<Pod>() {

                @Override
                public void eventReceived(Action action, Pod resource) {
                    if (action == Action.DELETED) {
                        String podName = resource.getMetadata().getName();
                        String podNamespace = resource.getMetadata().getNamespace();
                        StatefulSet statefulSet = ssDb.get(resourceName);
                        if (podName.startsWith(resourceName + "-") && Integer.parseInt(podName.substring(podName.lastIndexOf("-") + 1)) < statefulSet.getSpec().getReplicas()) {
                            mockPods.inNamespace(podNamespace).withName(podName).create(resource);
                        }
                    }
                }

                @Override
                public void onClose(KubernetesClientException e) {
                }
            });
        }

        private StatefulSet doPatch(String resourceName, StatefulSet argument) {
            int oldScale = ssDb.get(resourceName).getSpec().getReplicas();
            int newScale = argument.getSpec().getReplicas();
            if (newScale > oldScale) {
                LOGGER.debug("scaling up {} {} from {} to {}", resourceType, resourceName, oldScale, newScale);
                Pod examplePod = mockPods.inNamespace(argument.getMetadata().getNamespace()).withName(argument.getMetadata().getName() + "-0").get();
                for (int i = oldScale; i < newScale; i++) {
                    String newPodName = argument.getMetadata().getName() + "-" + i;
                    mockPods.inNamespace(argument.getMetadata().getNamespace()).withName(newPodName).create(new PodBuilder(examplePod).editMetadata().withName(newPodName).endMetadata().build());
                }
                ssDb.put(resourceName, copyResource(argument));
            } else if (newScale < oldScale) {
                ssDb.put(resourceName, copyResource(argument));
                LOGGER.debug("scaling down {} {} from {} to {}", resourceType, resourceName, oldScale, newScale);
                for (int i = oldScale - 1; i >= newScale; i--) {
                    String newPodName = argument.getMetadata().getName() + "-" + i;
                    mockPods.inNamespace(argument.getMetadata().getNamespace()).withName(newPodName).delete();
                }
            } else {
                ssDb.put(resourceName, copyResource(argument));
            }
            return argument;
        }
    }.build();
}
Also used : DoneablePod(io.fabric8.kubernetes.api.model.DoneablePod) KubernetesResourceList(io.fabric8.kubernetes.api.model.KubernetesResourceList) Doneable(io.fabric8.kubernetes.api.model.Doneable) Deployment(io.fabric8.kubernetes.api.model.extensions.Deployment) LoggerFactory(org.slf4j.LoggerFactory) Watcher(io.fabric8.kubernetes.client.Watcher) DoneablePersistentVolumeClaim(io.fabric8.kubernetes.api.model.DoneablePersistentVolumeClaim) Resource(io.fabric8.kubernetes.client.dsl.Resource) PodBuilder(io.fabric8.kubernetes.api.model.PodBuilder) Map(java.util.Map) Mockito.doAnswer(org.mockito.Mockito.doAnswer) PersistentVolumeClaimList(io.fabric8.kubernetes.api.model.PersistentVolumeClaimList) EditReplacePatchDeletable(io.fabric8.kubernetes.client.dsl.EditReplacePatchDeletable) KubernetesClientException(io.fabric8.kubernetes.client.KubernetesClientException) ExtensionsAPIGroupDSL(io.fabric8.kubernetes.client.dsl.ExtensionsAPIGroupDSL) Predicate(java.util.function.Predicate) Collection(java.util.Collection) Set(java.util.Set) HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) Collectors(java.util.stream.Collectors) DoneableDeployment(io.fabric8.kubernetes.api.model.extensions.DoneableDeployment) DoneableEndpoints(io.fabric8.kubernetes.api.model.DoneableEndpoints) PersistentVolumeClaim(io.fabric8.kubernetes.api.model.PersistentVolumeClaim) ServiceList(io.fabric8.kubernetes.api.model.ServiceList) Mockito.mock(org.mockito.Mockito.mock) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) DoneableService(io.fabric8.kubernetes.api.model.DoneableService) MixedOperation(io.fabric8.kubernetes.client.dsl.MixedOperation) DoneableStatefulSet(io.fabric8.kubernetes.api.model.extensions.DoneableStatefulSet) DeploymentList(io.fabric8.kubernetes.api.model.extensions.DeploymentList) EndpointsList(io.fabric8.kubernetes.api.model.EndpointsList) Watch(io.fabric8.kubernetes.client.Watch) HashMap(java.util.HashMap) ArgumentMatchers.anyBoolean(org.mockito.ArgumentMatchers.anyBoolean) ScalableResource(io.fabric8.kubernetes.client.dsl.ScalableResource) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) RollableScalableResource(io.fabric8.kubernetes.client.dsl.RollableScalableResource) Service(io.fabric8.kubernetes.api.model.Service) ArgumentMatchers.anyInt(org.mockito.ArgumentMatchers.anyInt) StatefulSetList(io.fabric8.kubernetes.api.model.extensions.StatefulSetList) Endpoints(io.fabric8.kubernetes.api.model.Endpoints) Logger(org.slf4j.Logger) Collections.emptySet(java.util.Collections.emptySet) StatefulSet(io.fabric8.kubernetes.api.model.extensions.StatefulSet) OngoingStubbing(org.mockito.stubbing.OngoingStubbing) ConfigMapList(io.fabric8.kubernetes.api.model.ConfigMapList) Pod(io.fabric8.kubernetes.api.model.Pod) Mockito.when(org.mockito.Mockito.when) DoneableConfigMap(io.fabric8.kubernetes.api.model.DoneableConfigMap) PodResource(io.fabric8.kubernetes.client.dsl.PodResource) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) KubernetesResource(io.fabric8.kubernetes.api.model.KubernetesResource) PodList(io.fabric8.kubernetes.api.model.PodList) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) AppsAPIGroupDSL(io.fabric8.kubernetes.client.dsl.AppsAPIGroupDSL) DoneableStatefulSet(io.fabric8.kubernetes.api.model.extensions.DoneableStatefulSet) DoneablePod(io.fabric8.kubernetes.api.model.DoneablePod) Pod(io.fabric8.kubernetes.api.model.Pod) PodBuilder(io.fabric8.kubernetes.api.model.PodBuilder) EditReplacePatchDeletable(io.fabric8.kubernetes.client.dsl.EditReplacePatchDeletable) Watcher(io.fabric8.kubernetes.client.Watcher) RollableScalableResource(io.fabric8.kubernetes.client.dsl.RollableScalableResource) StatefulSetList(io.fabric8.kubernetes.api.model.extensions.StatefulSetList) DoneableStatefulSet(io.fabric8.kubernetes.api.model.extensions.DoneableStatefulSet) StatefulSet(io.fabric8.kubernetes.api.model.extensions.StatefulSet) KubernetesClientException(io.fabric8.kubernetes.client.KubernetesClientException)

Example 12 with Watcher

use of io.fabric8.maven.watcher.api.Watcher in project strimzi by strimzi.

the class ControllerIT method setup.

@Before
public void setup(TestContext context) throws Exception {
    LOGGER.info("Setting up test");
    Runtime.getRuntime().addShutdownHook(kafkaHook);
    kafkaCluster = new KafkaCluster();
    kafkaCluster.addBrokers(1);
    kafkaCluster.deleteDataPriorToStartup(true);
    kafkaCluster.deleteDataUponShutdown(true);
    kafkaCluster.usingDirectory(Files.createTempDirectory("controller-integration-test").toFile());
    kafkaCluster.startup();
    kubeClient = new DefaultKubernetesClient().inNamespace(NAMESPACE);
    LOGGER.info("Using namespace {}", NAMESPACE);
    Map<String, String> m = new HashMap();
    m.put(Config.KAFKA_BOOTSTRAP_SERVERS.key, kafkaCluster.brokerList());
    m.put(Config.ZOOKEEPER_CONNECT.key, "localhost:" + zkPort(kafkaCluster));
    m.put(Config.NAMESPACE.key, NAMESPACE);
    session = new Session(kubeClient, new Config(m));
    Async async = context.async();
    vertx.deployVerticle(session, ar -> {
        if (ar.succeeded()) {
            deploymentId = ar.result();
            adminClient = session.adminClient;
            topicsConfigWatcher = session.topicConfigsWatcher;
            topicWatcher = session.topicWatcher;
            topicsWatcher = session.topicsWatcher;
            async.complete();
        } else {
            context.fail("Failed to deploy session");
        }
    });
    async.await();
    waitFor(context, () -> this.topicsWatcher.started(), timeout, "Topics watcher not started");
    waitFor(context, () -> this.topicsConfigWatcher.started(), timeout, "Topic configs watcher not started");
    waitFor(context, () -> this.topicWatcher.started(), timeout, "Topic watcher not started");
    // We can't delete events, so record the events which exist at the start of the test
    // and then waitForEvents() can ignore those
    preExistingEvents = kubeClient.events().inNamespace(NAMESPACE).withLabels(cmPredicate.labels()).list().getItems().stream().map(evt -> evt.getMetadata().getUid()).collect(Collectors.toSet());
    LOGGER.info("Finished setting up test");
}
Also used : KafkaCluster(io.debezium.kafka.KafkaCluster) HashMap(java.util.HashMap) Async(io.vertx.ext.unit.Async) DefaultKubernetesClient(io.fabric8.kubernetes.client.DefaultKubernetesClient) Before(org.junit.Before)

Example 13 with Watcher

use of io.fabric8.maven.watcher.api.Watcher in project docker-maven-plugin by fabric8io.

the class WatchService method createCopyWatchTask.

private Runnable createCopyWatchTask(final ImageWatcher watcher, final MojoParameters mojoParameters, final String containerBaseDir) throws MojoExecutionException {
    final ImageConfiguration imageConfig = watcher.getImageConfiguration();
    final AssemblyFiles files = archiveService.getAssemblyFiles(imageConfig, mojoParameters);
    return new Runnable() {

        @Override
        public void run() {
            List<AssemblyFiles.Entry> entries = files.getUpdatedEntriesAndRefresh();
            if (entries != null && entries.size() > 0) {
                try {
                    log.info("%s: Assembly changed. Copying changed files to container ...", imageConfig.getDescription());
                    File changedFilesArchive = archiveService.createChangedFilesArchive(entries, files.getAssemblyDirectory(), imageConfig.getName(), mojoParameters);
                    dockerAccess.copyArchive(watcher.getContainerId(), changedFilesArchive, containerBaseDir);
                    callPostExec(watcher);
                } catch (MojoExecutionException | IOException | ExecException e) {
                    log.error("%s: Error when copying files to container %s: %s", imageConfig.getDescription(), watcher.getContainerId(), e.getMessage());
                }
            }
        }
    };
}
Also used : AssemblyFiles(io.fabric8.maven.docker.assembly.AssemblyFiles) MojoExecutionException(org.apache.maven.plugin.MojoExecutionException) ImageConfiguration(io.fabric8.maven.docker.config.ImageConfiguration) WatchImageConfiguration(io.fabric8.maven.docker.config.WatchImageConfiguration) ExecException(io.fabric8.maven.docker.access.ExecException) IOException(java.io.IOException) File(java.io.File)

Example 14 with Watcher

use of io.fabric8.maven.watcher.api.Watcher in project docker-maven-plugin by fabric8io.

the class WatchService method watch.

public synchronized void watch(WatchContext context, BuildService.BuildContext buildContext, List<ImageConfiguration> images) throws DockerAccessException, MojoExecutionException {
    // Important to be be a single threaded scheduler since watch jobs must run serialized
    ScheduledExecutorService executor = null;
    try {
        executor = Executors.newSingleThreadScheduledExecutor();
        for (StartOrderResolver.Resolvable resolvable : runService.getImagesConfigsInOrder(queryService, images)) {
            final ImageConfiguration imageConfig = (ImageConfiguration) resolvable;
            String imageId = queryService.getImageId(imageConfig.getName());
            String containerId = runService.lookupContainer(imageConfig.getName());
            ImageWatcher watcher = new ImageWatcher(imageConfig, context, imageId, containerId);
            long interval = watcher.getInterval();
            WatchMode watchMode = watcher.getWatchMode(imageConfig);
            log.info("Watching " + imageConfig.getName() + (watchMode != null ? " using " + watchMode.getDescription() : ""));
            ArrayList<String> tasks = new ArrayList<>();
            if (imageConfig.getBuildConfiguration() != null && imageConfig.getBuildConfiguration().getAssemblyConfiguration() != null) {
                if (watcher.isCopy()) {
                    String containerBaseDir = imageConfig.getBuildConfiguration().getAssemblyConfiguration().getTargetDir();
                    schedule(executor, createCopyWatchTask(watcher, context.getMojoParameters(), containerBaseDir), interval);
                    tasks.add("copying artifacts");
                }
                if (watcher.isBuild()) {
                    schedule(executor, createBuildWatchTask(watcher, context.getMojoParameters(), watchMode == WatchMode.both, buildContext), interval);
                    tasks.add("rebuilding");
                }
            }
            if (watcher.isRun() && watcher.getContainerId() != null) {
                schedule(executor, createRestartWatchTask(watcher), interval);
                tasks.add("restarting");
            }
            if (tasks.size() > 0) {
                log.info("%s: Watch for %s", imageConfig.getDescription(), StringUtils.join(tasks.toArray(), " and "));
            }
        }
        log.info("Waiting ...");
        if (!context.isKeepRunning()) {
            runService.addShutdownHookForStoppingContainers(context.isKeepContainer(), context.isRemoveVolumes(), context.isAutoCreateCustomNetworks());
        }
        wait();
    } catch (InterruptedException e) {
        log.warn("Interrupted");
    } finally {
        if (executor != null) {
            executor.shutdownNow();
        }
    }
}
Also used : ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) StartOrderResolver(io.fabric8.maven.docker.util.StartOrderResolver) ImageConfiguration(io.fabric8.maven.docker.config.ImageConfiguration) WatchImageConfiguration(io.fabric8.maven.docker.config.WatchImageConfiguration) WatchMode(io.fabric8.maven.docker.config.WatchMode) ArrayList(java.util.ArrayList)

Example 15 with Watcher

use of io.fabric8.maven.watcher.api.Watcher in project fabric8 by jboss-fuse.

the class ProfileWatcherImpl method run.

public void run() {
    assertValid();
    LOG.debug("Profile watcher thread started");
    int oldCounter = -1;
    SortedSet<String> oldActiveProfiles = null;
    Map<File, Long> localChecksums = new HashMap<File, Long>();
    Map<File, Long> localModified = new HashMap<File, Long>();
    Set<Profile> refreshProfiles = new HashSet<Profile>();
    ProfileService profileService = fabricService.get().adapt(ProfileService.class);
    while (running.get() && watchURLs.size() > 0) {
        SortedSet<String> currentActiveProfiles = getCurrentActiveProfileVersions();
        if (profileArtifacts == null || oldCounter != counter.get() || oldActiveProfiles == null || !oldActiveProfiles.equals(currentActiveProfiles)) {
            oldCounter = counter.get();
            oldActiveProfiles = currentActiveProfiles;
            try {
                LOG.debug("Reloading the currently active profile artifacts");
                profileArtifacts = findProfileArifacts();
            } catch (Exception e) {
                LOG.error("Failed to get profiles artifacts: " + e, e);
            }
        }
        // lets refresh profiles on the next loop; so we've time to finish uploading/modifying files
        for (Profile profile : refreshProfiles) {
            LOG.info("Refreshing profile: " + profile);
            Profiles.refreshProfile(fabricService.get(), profile);
        }
        refreshProfiles.clear();
        if (profileArtifacts != null) {
            List<File> localRepositories = new LinkedList<>();
            if (mavenResolver.get().getLocalRepository() != null) {
                localRepositories.add(mavenResolver.get().getLocalRepository());
            }
            if (mavenResolver.get().getDefaultRepositories() != null) {
                for (LocalRepository repository : mavenResolver.get().getDefaultRepositories()) {
                    localRepositories.add(repository.getBasedir());
                }
            }
            Set<Map.Entry<ProfileVersionKey, Map<String, Parser>>> entries = profileArtifacts.entrySet();
            for (Map.Entry<ProfileVersionKey, Map<String, Parser>> entry : entries) {
                ProfileVersionKey key = entry.getKey();
                Map<String, Parser> artifactMap = entry.getValue();
                // lets find a container for the profile
                Profile profile = key.getProfile();
                Properties checksums = findProfileChecksums(fabricService.get(), profile);
                if (checksums != null) {
                    Set<Map.Entry<String, Parser>> artifactMapEntries = artifactMap.entrySet();
                    for (Map.Entry<String, Parser> artifactMapEntry : artifactMapEntries) {
                        String location = artifactMapEntry.getKey();
                        Parser parser = artifactMapEntry.getValue();
                        if (isSnapshot(parser) || wildCardMatch(location)) {
                            Object value = checksums.get(location);
                            if (value == null) {
                                value = checksums.get(JavaContainers.removeUriPrefixBeforeMaven(location));
                            }
                            Long checksum = null;
                            if (value instanceof Number) {
                                checksum = ((Number) value).longValue();
                            } else if (value instanceof String) {
                                checksum = Long.parseLong((String) value);
                            }
                            if (checksum == null) {
                                if (missingChecksums.add(location)) {
                                    LOG.warn("Could not find checksum for location " + location);
                                }
                            } else {
                                File file = null;
                                for (File localRepository : localRepositories) {
                                    File _file = new File(localRepository.getPath() + File.separator + parser.getArtifactPath());
                                    if (_file.isFile()) {
                                        file = _file;
                                        break;
                                    }
                                }
                                if (!file.exists()) {
                                    LOG.info("Ignoring file " + file.getPath() + " as it does not exist");
                                } else {
                                    // lets use a cache of last modified times to avoid having to continuously
                                    // recalculate the checksum on each file
                                    Long oldModfied = localModified.get(file);
                                    long modified = file.lastModified();
                                    if (oldModfied == null || modified != oldModfied) {
                                        localModified.put(file, modified);
                                        Long fileChecksum = getFileChecksum(file);
                                        if (fileChecksum != null && !fileChecksum.equals(checksum)) {
                                            // lets keep track of local checksums in case we've already started the upload process
                                            // and it takes the profile a little while to respond to uploaded jars and to
                                            // refreshed profiles
                                            Long localChecksum = localChecksums.get(file);
                                            if (localChecksum == null || !localChecksum.equals(fileChecksum)) {
                                                localChecksums.put(file, fileChecksum);
                                                LOG.info("Checksums don't match for " + location + ", container: " + checksum + " and local file: " + fileChecksum);
                                                LOG.info("Updated version of " + location + " detected in " + file);
                                                if (isUpload()) {
                                                    uploadFile(location, parser, file);
                                                }
                                                refreshProfiles.add(profile);
                                            }
                                        }
                                    }
                                }
                            }
                        } else {
                            if (LOG.isTraceEnabled()) {
                                LOG.trace("Ignoring " + location);
                            }
                        }
                    }
                }
            }
        }
        try {
            Thread.sleep(interval);
        } catch (InterruptedException ex) {
            running.set(false);
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Profile watcher thread stopped");
    }
}
Also used : HashMap(java.util.HashMap) Properties(java.util.Properties) Profile(io.fabric8.api.Profile) HashSet(java.util.HashSet) LocalRepository(org.eclipse.aether.repository.LocalRepository) MalformedURLException(java.net.MalformedURLException) IOException(java.io.IOException) LinkedList(java.util.LinkedList) Parser(io.fabric8.maven.util.Parser) ProfileService(io.fabric8.api.ProfileService) File(java.io.File) Map(java.util.Map) HashMap(java.util.HashMap)

Aggregations

Watch (io.fabric8.kubernetes.client.Watch)11 Watcher (io.fabric8.kubernetes.client.Watcher)9 Pod (io.fabric8.kubernetes.api.model.Pod)8 KubernetesClientException (io.fabric8.kubernetes.client.KubernetesClientException)8 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)7 PodList (io.fabric8.kubernetes.api.model.PodList)6 ImageConfiguration (io.fabric8.maven.docker.config.ImageConfiguration)5 ArrayList (java.util.ArrayList)5 MojoExecutionException (org.apache.maven.plugin.MojoExecutionException)5 DoneablePod (io.fabric8.kubernetes.api.model.DoneablePod)4 WatchImageConfiguration (io.fabric8.maven.docker.config.WatchImageConfiguration)4 IOException (java.io.IOException)4 ConfigMap (io.fabric8.kubernetes.api.model.ConfigMap)3 LogWatch (io.fabric8.kubernetes.client.dsl.LogWatch)3 MixedOperation (io.fabric8.kubernetes.client.dsl.MixedOperation)3 HashMap (java.util.HashMap)3 ConfigMapList (io.fabric8.kubernetes.api.model.ConfigMapList)2 HasMetadata (io.fabric8.kubernetes.api.model.HasMetadata)2 DoneableStatefulSet (io.fabric8.kubernetes.api.model.extensions.DoneableStatefulSet)2 StatefulSet (io.fabric8.kubernetes.api.model.extensions.StatefulSet)2