use of com.netflix.titus.common.util.tuple.Pair in project titus-control-plane by Netflix.
the class EurekaContainerHealthService method handleEurekaEvent.
private Flux<ContainerHealthEvent> handleEurekaEvent(EurekaEvent event, ConcurrentMap<String, ContainerHealthEvent> state) {
if (!(event instanceof CacheRefreshedEvent)) {
return Flux.empty();
}
List<Pair<Job, List<Task>>> allJobsAndTasks = jobOperations.getJobsAndTasks();
List<Task> allTasks = new ArrayList<>();
List<ContainerHealthEvent> events = new ArrayList<>();
allJobsAndTasks.forEach(jobAndTasks -> {
jobAndTasks.getRight().forEach(task -> {
handleTaskStateUpdate(jobAndTasks.getLeft(), task, state).ifPresent(events::add);
allTasks.add(task);
});
});
// Cleanup, in case we have stale entries.
Set<String> unknownTaskIds = CollectionsExt.copyAndRemove(state.keySet(), allTasks.stream().map(Task::getId).collect(Collectors.toSet()));
unknownTaskIds.forEach(taskId -> {
state.remove(taskId);
// Assume the task was terminated.
ContainerHealthStatus terminatedStatus = ContainerHealthStatus.newBuilder().withTaskId(taskId).withTimestamp(titusRuntime.getClock().wallTime()).withState(ContainerHealthState.Terminated).withReason("terminated").build();
events.add(ContainerHealthUpdateEvent.healthChanged(terminatedStatus));
});
return Flux.fromIterable(events);
}
use of com.netflix.titus.common.util.tuple.Pair in project titus-control-plane by Netflix.
the class JobSnapshotDownloader method writeMapping.
private void writeMapping(String table, Function<List<Pair<Object, Object>>, Map<?, ?>> mapper) {
File output = new File(outputFolder, table + ".json");
List<Pair<Object, Object>> allItems = CassandraUtils.readTwoColumnTable(session, table).toList().toBlocking().first();
System.out.println(String.format("Writing %s rows from table %s to file: %s...", allItems.size(), table, output));
try {
MAPPER.writeValue(output, mapper.apply(allItems));
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
use of com.netflix.titus.common.util.tuple.Pair in project titus-control-plane by Netflix.
the class CassandraUtils method truncateTableInternal.
private static boolean truncateTableInternal(CommandContext context, String table) {
PreparedStatement truncateStatement = context.getTargetSession().prepare("TRUNCATE \"" + table + "\"");
try {
context.getTargetCassandraExecutor().executeUpdate(truncateStatement.bind()).toBlocking().firstOrDefault(null);
} catch (TruncateException e) {
// Check if the table is empty
logger.info("Couldn't complete the truncate operation. Checking if the table is empty: {}", table);
Pair<Object, Object> value = readTwoColumnTable(context.getTargetSession(), table).take(1).toBlocking().firstOrDefault(null);
if (value == null) {
// Truncate failed, but the table is empty. It is ok to move on.
logger.info("Truncate deemed as successful, as the table is empty: {}", table);
return true;
}
if (e.getMessage().contains("Cannot achieve consistency level ALL")) {
logger.warn("Recoverable truncate operations for table {}. Cause: {}", table, e.getMessage());
return false;
}
// Not recoverable error. Re-throw it.
throw e;
}
logger.info("Truncated table {}.{}", truncateStatement.getQueryKeyspace(), table);
return true;
}
use of com.netflix.titus.common.util.tuple.Pair in project titus-control-plane by Netflix.
the class CassandraUtils method resolveColumnNamesInTwoColumnTable.
public static Pair<String, String> resolveColumnNamesInTwoColumnTable(Session sourceSession, String table) {
TableMetadata tableMetadata = sourceSession.getCluster().getMetadata().getKeyspace(sourceSession.getLoggedKeyspace()).getTable(table);
String primaryKey = tableMetadata.getPartitionKey().get(0).getName();
List<String> valueColumns = tableMetadata.getColumns().stream().map(ColumnMetadata::getName).filter(c -> !c.equals(primaryKey)).collect(Collectors.toList());
Preconditions.checkState(valueColumns.size() == 1, "Expected one non primary key column, and is: %s", valueColumns);
String valueColumn = valueColumns.get(0);
return Pair.of(primaryKey, valueColumn);
}
use of com.netflix.titus.common.util.tuple.Pair in project titus-control-plane by Netflix.
the class V1SpecPodFactory method buildV1Pod.
@Override
public V1Pod buildV1Pod(Job<?> job, Task task) {
String taskId = task.getId();
Map<String, String> annotations = createV1SchemaPodAnnotations(job, task);
Pair<V1Affinity, Map<String, String>> affinityWithMetadata = podAffinityFactory.buildV1Affinity(job, task);
annotations.putAll(affinityWithMetadata.getRight());
Pair<List<String>, Map<String, String>> envVarsWithIndex = containerEnvFactory.buildContainerEnv(job, task);
List<V1EnvVar> envVarsList = toV1EnvVar(envVarsWithIndex.getRight());
annotations.put(POD_SYSTEM_ENV_VAR_NAMES, String.join(",", envVarsWithIndex.getLeft()));
Map<String, String> labels = new HashMap<>();
labels.put(KubeConstants.POD_LABEL_JOB_ID, job.getId());
labels.put(KubeConstants.POD_LABEL_TASK_ID, taskId);
JobManagerUtil.getRelocationBinpackMode(job).ifPresent(mode -> labels.put(KubeConstants.POD_LABEL_RELOCATION_BINPACK, mode));
// A V1Container has no room to store the original tag that the Image came from, so we store it as an
// annotation. Only saving the 'main' one for now.
annotations.put(POD_IMAGE_TAG_PREFIX + "main", job.getJobDescriptor().getContainer().getImage().getTag());
JobDescriptor<?> jobDescriptor = job.getJobDescriptor();
String capacityGroup = JobManagerUtil.getCapacityGroupDescriptorName(job.getJobDescriptor(), capacityGroupManagement).toLowerCase();
labels.put(KubeConstants.LABEL_CAPACITY_GROUP, capacityGroup);
V1ObjectMeta metadata = new V1ObjectMeta().name(taskId).namespace(DEFAULT_NAMESPACE).annotations(annotations).labels(labels);
V1Container container = new V1Container().name("main").image(KubePodUtil.buildImageString(configuration.getRegistryUrl(), jobDescriptor.getContainer().getImage())).env(envVarsList).resources(buildV1ResourceRequirements(job.getJobDescriptor().getContainer().getContainerResources())).imagePullPolicy(DEFAULT_IMAGE_PULL_POLICY).volumeMounts(KubePodUtil.buildV1VolumeMounts(job.getJobDescriptor().getContainer().getVolumeMounts()));
Container jobContainer = jobDescriptor.getContainer();
if (CollectionsExt.isNullOrEmpty(jobContainer.getCommand()) && !shouldSkipEntryPointJoin(jobDescriptor.getAttributes())) {
// use the old behavior where the agent needs to do shell splitting
String entrypointStr = StringExt.concatenate(jobContainer.getEntryPoint(), " ");
container.setCommand(Collections.singletonList(entrypointStr));
annotations.put(ENTRYPOINT_SHELL_SPLITTING_ENABLED, "true");
} else {
container.setCommand(jobContainer.getEntryPoint());
container.setArgs(jobContainer.getCommand());
}
List<V1Container> extraContainers = buildV1ExtraContainers(job.getJobDescriptor().getExtraContainers());
List<V1Container> allContainers = Stream.concat(Stream.of(container), extraContainers.stream()).collect(Collectors.toList());
List<V1Volume> volumes = buildV1Volumes(job.getJobDescriptor().getVolumes());
ApplicationSLA capacityGroupDescriptor = JobManagerUtil.getCapacityGroupDescriptor(job.getJobDescriptor(), capacityGroupManagement);
String schedulerName = selectScheduler(schedulerConfiguration, capacityGroupDescriptor, configuration);
V1PodSpec spec = new V1PodSpec().schedulerName(schedulerName).containers(allContainers).volumes(volumes).terminationGracePeriodSeconds(configuration.getPodTerminationGracePeriodSeconds()).restartPolicy(NEVER_RESTART_POLICY).dnsPolicy(DEFAULT_DNS_POLICY).affinity(affinityWithMetadata.getLeft()).tolerations(taintTolerationFactory.buildV1Toleration(job, task)).topologySpreadConstraints(topologyFactory.buildTopologySpreadConstraints(job));
// volumes need to be correctly added to pod spec
Optional<Pair<V1Volume, V1VolumeMount>> optionalEbsVolumeInfo = buildV1EBSObjects(job, task);
if (optionalEbsVolumeInfo.isPresent()) {
spec.addVolumesItem(optionalEbsVolumeInfo.get().getLeft());
container.addVolumeMountsItem(optionalEbsVolumeInfo.get().getRight());
}
appendEfsMounts(spec, container, job);
appendShmMount(spec, container, job);
return new V1Pod().metadata(metadata).spec(spec);
}
Aggregations