use of com.netflix.spinnaker.halyard.deploy.spinnaker.v1.RunningServiceDetails in project halyard by spinnaker.
the class KubernetesV1DistributedService method getRunningServiceDetails.
@Override
default RunningServiceDetails getRunningServiceDetails(AccountDeploymentDetails<KubernetesAccount> details, SpinnakerRuntimeSettings runtimeSettings) {
ServiceSettings settings = runtimeSettings.getServiceSettings(getService());
RunningServiceDetails res = new RunningServiceDetails();
KubernetesClient client = KubernetesV1ProviderUtils.getClient(details);
String name = getServiceName();
String namespace = getNamespace(settings);
RunningServiceDetails.LoadBalancer lb = new RunningServiceDetails.LoadBalancer();
lb.setExists(client.services().inNamespace(namespace).withName(name).get() != null);
res.setLoadBalancer(lb);
List<Pod> pods = client.pods().inNamespace(namespace).withLabel("load-balancer-" + name, "true").list().getItems();
pods.addAll(client.pods().inNamespace(namespace).withLabel("load-balancer-" + name, "false").list().getItems());
Map<Integer, List<Instance>> instances = res.getInstances();
for (Pod pod : pods) {
String podName = pod.getMetadata().getName();
String serverGroupName = podName.substring(0, podName.lastIndexOf("-"));
Names parsedName = Names.parseName(serverGroupName);
Integer version = parsedName.getSequence();
if (version == null) {
throw new IllegalStateException("Server group for service " + getServiceName() + " has unknown sequence (" + serverGroupName + ")");
}
String location = pod.getMetadata().getNamespace();
String id = pod.getMetadata().getName();
Instance instance = new Instance().setId(id).setLocation(location);
List<ContainerStatus> containerStatuses = pod.getStatus().getContainerStatuses();
if (!containerStatuses.isEmpty() && containerStatuses.stream().allMatch(ContainerStatus::getReady)) {
instance.setHealthy(true);
}
if (!containerStatuses.isEmpty() && containerStatuses.stream().allMatch(s -> s.getState().getRunning() != null && s.getState().getTerminated() == null)) {
instance.setRunning(true);
}
List<Instance> knownInstances = instances.getOrDefault(version, new ArrayList<>());
knownInstances.add(instance);
instances.put(version, knownInstances);
}
List<ReplicaSet> replicaSets = client.extensions().replicaSets().inNamespace(settings.getLocation()).list().getItems();
for (ReplicaSet rs : replicaSets) {
String rsName = rs.getMetadata().getName();
Names parsedRsName = Names.parseName(rsName);
if (!parsedRsName.getCluster().equals(getServiceName())) {
continue;
}
instances.computeIfAbsent(parsedRsName.getSequence(), i -> new ArrayList<>());
}
return res;
}
use of com.netflix.spinnaker.halyard.deploy.spinnaker.v1.RunningServiceDetails in project halyard by spinnaker.
the class KubernetesV1DistributedService method ensureRunning.
default void ensureRunning(AccountDeploymentDetails<KubernetesAccount> details, GenerateService.ResolvedConfiguration resolvedConfiguration, List<ConfigSource> configSources, boolean recreate) {
ServiceSettings settings = resolvedConfiguration.getServiceSettings(getService());
SpinnakerRuntimeSettings runtimeSettings = resolvedConfiguration.getRuntimeSettings();
String namespace = getNamespace(settings);
String serviceName = getServiceName();
String replicaSetName = serviceName + "-v000";
int port = settings.getPort();
SpinnakerMonitoringDaemonService monitoringService = getMonitoringDaemonService();
ServiceSettings monitoringSettings = runtimeSettings.getServiceSettings(monitoringService);
KubernetesClient client = KubernetesV1ProviderUtils.getClient(details);
KubernetesV1ProviderUtils.createNamespace(details, namespace);
Map<String, String> serviceSelector = new HashMap<>();
serviceSelector.put("load-balancer-" + serviceName, "true");
Map<String, String> replicaSetSelector = new HashMap<>();
replicaSetSelector.put("replication-controller", replicaSetName);
Map<String, String> podLabels = new HashMap<>();
podLabels.putAll(replicaSetSelector);
podLabels.putAll(serviceSelector);
Map<String, String> serviceLabels = new HashMap<>();
serviceLabels.put("app", "spin");
serviceLabels.put("stack", getCanonicalName());
ServiceBuilder serviceBuilder = new ServiceBuilder();
serviceBuilder = serviceBuilder.withNewMetadata().withName(serviceName).withNamespace(namespace).withLabels(serviceLabels).endMetadata().withNewSpec().withSelector(serviceSelector).withPorts(new ServicePortBuilder().withPort(port).withName("http").build(), new ServicePortBuilder().withPort(monitoringSettings.getPort()).withName("monitoring").build()).endSpec();
boolean create = true;
if (client.services().inNamespace(namespace).withName(serviceName).get() != null) {
if (recreate) {
client.services().inNamespace(namespace).withName(serviceName).delete();
} else {
create = false;
}
}
if (create) {
client.services().inNamespace(namespace).create(serviceBuilder.build());
}
List<Container> containers = new ArrayList<>();
DeploymentEnvironment deploymentEnvironment = details.getDeploymentConfiguration().getDeploymentEnvironment();
containers.add(ResourceBuilder.buildContainer(serviceName, settings, configSources, deploymentEnvironment));
for (SidecarService sidecarService : getSidecars(runtimeSettings)) {
String sidecarName = sidecarService.getService().getServiceName();
ServiceSettings sidecarSettings = resolvedConfiguration.getServiceSettings(sidecarService.getService());
containers.add(ResourceBuilder.buildContainer(sidecarName, sidecarSettings, configSources, deploymentEnvironment));
}
List<Volume> volumes = configSources.stream().map(c -> {
return new VolumeBuilder().withName(c.getId()).withSecret(new SecretVolumeSourceBuilder().withSecretName(c.getId()).build()).build();
}).collect(Collectors.toList());
ReplicaSetBuilder replicaSetBuilder = new ReplicaSetBuilder();
List<LocalObjectReference> imagePullSecrets = getImagePullSecrets(settings);
Map componentSizing = deploymentEnvironment.getCustomSizing().get(serviceName);
replicaSetBuilder = replicaSetBuilder.withNewMetadata().withName(replicaSetName).withNamespace(namespace).endMetadata().withNewSpec().withReplicas(retrieveKubernetesTargetSize(componentSizing)).withNewSelector().withMatchLabels(replicaSetSelector).endSelector().withNewTemplate().withNewMetadata().withAnnotations(settings.getKubernetes().getPodAnnotations()).withLabels(podLabels).endMetadata().withNewSpec().withContainers(containers).withTerminationGracePeriodSeconds(5L).withVolumes(volumes).withImagePullSecrets(imagePullSecrets).endSpec().endTemplate().endSpec();
create = true;
if (client.extensions().replicaSets().inNamespace(namespace).withName(replicaSetName).get() != null) {
if (recreate) {
client.extensions().replicaSets().inNamespace(namespace).withName(replicaSetName).delete();
RunningServiceDetails runningServiceDetails = getRunningServiceDetails(details, runtimeSettings);
while (runningServiceDetails.getLatestEnabledVersion() != null) {
DaemonTaskHandler.safeSleep(TimeUnit.SECONDS.toMillis(5));
runningServiceDetails = getRunningServiceDetails(details, runtimeSettings);
}
} else {
create = false;
}
}
if (create) {
client.extensions().replicaSets().inNamespace(namespace).create(replicaSetBuilder.build());
}
RunningServiceDetails runningServiceDetails = getRunningServiceDetails(details, runtimeSettings);
Integer version = runningServiceDetails.getLatestEnabledVersion();
while (version == null || runningServiceDetails.getInstances().get(version).stream().anyMatch(i -> !(i.isHealthy() && i.isRunning()))) {
DaemonTaskHandler.safeSleep(TimeUnit.SECONDS.toMillis(5));
runningServiceDetails = getRunningServiceDetails(details, runtimeSettings);
version = runningServiceDetails.getLatestEnabledVersion();
}
}
use of com.netflix.spinnaker.halyard.deploy.spinnaker.v1.RunningServiceDetails in project halyard by spinnaker.
the class GoogleDistributedService method getRunningServiceDetails.
@Override
default RunningServiceDetails getRunningServiceDetails(AccountDeploymentDetails<GoogleAccount> details, SpinnakerRuntimeSettings runtimeSettings) {
ServiceSettings settings = runtimeSettings.getServiceSettings(getService());
RunningServiceDetails result = new RunningServiceDetails();
// All GCE load balancing is done via consul
result.setLoadBalancer(new RunningServiceDetails.LoadBalancer().setExists(true));
Compute compute = GoogleProviderUtils.getCompute(details);
GoogleAccount account = details.getAccount();
List<InstanceGroupManager> migs;
try {
migs = compute.instanceGroupManagers().list(account.getProject(), settings.getLocation()).execute().getItems();
if (migs == null) {
migs = Collections.emptyList();
}
} catch (IOException e) {
throw new HalException(FATAL, "Failed to load MIGS: " + e.getMessage(), e);
}
boolean consulEnabled = getSidecars(runtimeSettings).stream().anyMatch(s -> s.getService().getType().equals(SpinnakerService.Type.CONSUL_CLIENT));
Set<String> healthyConsulInstances = consulEnabled ? getConsulServerService().connectToPrimaryService(details, runtimeSettings).serviceHealth(getService().getCanonicalName(), true).stream().map(s -> s != null && s.getNode() != null ? s.getNode().getNodeName() : null).filter(Objects::nonNull).collect(Collectors.toSet()) : new HashSet<>();
String serviceName = getService().getServiceName();
migs = migs.stream().filter(ig -> ig.getName().startsWith(serviceName + "-v")).collect(Collectors.toList());
Map<Integer, List<RunningServiceDetails.Instance>> instances = migs.stream().reduce(new HashMap<>(), (map, mig) -> {
Names names = Names.parseName(mig.getName());
Integer version = names.getSequence();
List<RunningServiceDetails.Instance> computeInstances;
try {
List<ManagedInstance> managedInstances = compute.instanceGroupManagers().listManagedInstances(account.getProject(), settings.getLocation(), mig.getName()).execute().getManagedInstances();
if (managedInstances == null) {
managedInstances = new ArrayList<>();
}
computeInstances = managedInstances.stream().map(i -> {
String instanceUrl = i.getInstance();
String instanceStatus = i.getInstanceStatus();
boolean running = instanceStatus != null && instanceStatus.equalsIgnoreCase("running");
String instanceName = instanceUrl.substring(instanceUrl.lastIndexOf('/') + 1, instanceUrl.length());
return new RunningServiceDetails.Instance().setId(instanceName).setLocation(settings.getLocation()).setRunning(running).setHealthy(!consulEnabled || healthyConsulInstances.contains(instanceName));
}).collect(Collectors.toList());
} catch (IOException e) {
throw new HalException(FATAL, "Failed to load target pools for " + serviceName, e);
}
map.put(version, computeInstances);
return map;
}, (m1, m2) -> {
m1.putAll(m2);
return m1;
});
result.setInstances(instances);
return result;
}
use of com.netflix.spinnaker.halyard.deploy.spinnaker.v1.RunningServiceDetails in project halyard by spinnaker.
the class DistributedLogCollector method collectLogs.
@Override
public void collectLogs(AccountDeploymentDetails<A> details, SpinnakerRuntimeSettings runtimeSettings) {
DistributedService<T, A> distributedService = (DistributedService<T, A>) getService();
RunningServiceDetails runningServiceDetails = distributedService.getRunningServiceDetails(details, runtimeSettings);
runningServiceDetails.getInstances().values().forEach(is -> is.stream().filter(RunningServiceDetails.Instance::isRunning).forEach(i -> {
File outputDir = getDirectoryStructure().getServiceLogsPath(details.getDeploymentName(), i.getId(), getService().getCanonicalName()).toFile();
collectInstanceLogs(details, runtimeSettings, outputDir, i.getId());
}));
}
use of com.netflix.spinnaker.halyard.deploy.spinnaker.v1.RunningServiceDetails in project halyard by spinnaker.
the class DistributedDeployer method deploy.
@Override
public RemoteAction deploy(DistributedServiceProvider<T> serviceProvider, AccountDeploymentDetails<T> deploymentDetails, ResolvedConfiguration resolvedConfiguration, List<SpinnakerService.Type> serviceTypes) {
SpinnakerRuntimeSettings runtimeSettings = resolvedConfiguration.getRuntimeSettings();
DaemonTaskHandler.newStage("Deploying Spinnaker");
// First deploy all services not owned by Spinnaker
for (DistributedService distributedService : serviceProvider.getPrioritizedDistributedServices(serviceTypes)) {
SpinnakerService service = distributedService.getService();
ServiceSettings settings = resolvedConfiguration.getServiceSettings(service);
if (!settings.getEnabled() || settings.getSkipLifeCycleManagement()) {
continue;
}
DaemonTaskHandler.newStage("Determining status of " + distributedService.getServiceName());
boolean safeToUpdate = settings.getSafeToUpdate();
RunningServiceDetails runningServiceDetails = distributedService.getRunningServiceDetails(deploymentDetails, runtimeSettings);
if (distributedService.isRequiredToBootstrap() || !safeToUpdate) {
deployServiceManually(deploymentDetails, resolvedConfiguration, distributedService, safeToUpdate);
} else {
DaemonResponse.StaticRequestBuilder<Void> builder = new DaemonResponse.StaticRequestBuilder<>(() -> {
if (runningServiceDetails.getLatestEnabledVersion() == null) {
DaemonTaskHandler.newStage("Deploying " + distributedService.getServiceName() + " via provider API");
deployServiceManually(deploymentDetails, resolvedConfiguration, distributedService, safeToUpdate);
} else {
DaemonTaskHandler.newStage("Deploying " + distributedService.getServiceName() + " via red/black");
Orca orca = serviceProvider.getDeployableService(SpinnakerService.Type.ORCA_BOOTSTRAP, Orca.class).connectToPrimaryService(deploymentDetails, runtimeSettings);
deployServiceWithOrca(deploymentDetails, resolvedConfiguration, orca, distributedService);
}
return null;
});
DaemonTaskHandler.submitTask(builder::build, "Deploy " + distributedService.getServiceName());
}
}
DaemonTaskHandler.message("Waiting on deployments to complete");
DaemonTaskHandler.reduceChildren(null, (t1, t2) -> null, (t1, t2) -> null).getProblemSet().throwifSeverityExceeds(Problem.Severity.WARNING);
DistributedService<Orca, T> orca = serviceProvider.getDeployableService(SpinnakerService.Type.ORCA);
Set<Integer> unknownVersions = reapOrcaServerGroups(deploymentDetails, runtimeSettings, orca);
reapRoscoServerGroups(deploymentDetails, runtimeSettings, serviceProvider.getDeployableService(SpinnakerService.Type.ROSCO));
if (!unknownVersions.isEmpty()) {
String versions = String.join(", ", unknownVersions.stream().map(orca::getVersionedName).collect(Collectors.toList()));
throw new HalException(new ProblemBuilder(Problem.Severity.ERROR, "The following orca versions (" + versions + ") could not safely be drained of work.").setRemediation("Please make sure that no pipelines are running, and manually destroy the server groups at those versions.").build());
}
return new RemoteAction();
}
Aggregations