Search in sources :

Example 1 with Pair

use of com.netflix.titus.common.util.tuple.Pair in project titus-control-plane by Netflix.

the class MapWithStateTransformer method call.

@Override
public Observable<R> call(Observable<T> source) {
    return Observable.unsafeCreate(subscriber -> {
        AtomicReference<S> lastState = new AtomicReference<>(zeroSupplier.get());
        Observable<Either<T, Function<S, Pair<R, S>>>> sourceEither = source.map(Either::ofValue);
        Observable<Either<T, Function<S, Pair<R, S>>>> cleanupEither = cleanupActions.map(Either::ofError);
        Subscription subscription = Observable.merge(sourceEither, cleanupEither).subscribe(next -> {
            Pair<R, S> result;
            if (next.hasValue()) {
                try {
                    result = transformer.apply(next.getValue(), lastState.get());
                } catch (Throwable e) {
                    subscriber.onError(e);
                    return;
                }
            } else {
                try {
                    Function<S, Pair<R, S>> action = next.getError();
                    result = action.apply(lastState.get());
                } catch (Throwable e) {
                    subscriber.onError(e);
                    return;
                }
            }
            lastState.set(result.getRight());
            subscriber.onNext(result.getLeft());
        }, subscriber::onError, subscriber::onCompleted);
        subscriber.add(subscription);
    });
}
Also used : Either(com.netflix.titus.common.util.tuple.Either) AtomicReference(java.util.concurrent.atomic.AtomicReference) Subscription(rx.Subscription) Pair(com.netflix.titus.common.util.tuple.Pair)

Example 2 with Pair

use of com.netflix.titus.common.util.tuple.Pair in project titus-control-plane by Netflix.

the class JobSnapshotTest method testJobAndTaskUpdate.

@Test
public void testJobAndTaskUpdate() {
    Pair<Job<?>, Map<String, Task>> pair1 = (Pair) newServiceJobWithTasks(1, 2, 1_000);
    Pair<Job<?>, Map<String, Task>> pair2 = (Pair) newBatchJobWithTasks(2, 2);
    Job<?> job1 = pair1.getLeft();
    Job<?> job2 = pair2.getLeft();
    List<Task> tasks1 = new ArrayList<>(pair1.getRight().values());
    List<Task> tasks2 = new ArrayList<>(pair2.getRight().values());
    JobSnapshot initial = newSnapshot(factory, pair1);
    // Add job2
    JobSnapshot updated = initial.updateJob(job2).orElse(null);
    assertThat(updated).isNotNull();
    assertThat(updated.getJobMap()).containsValues(job1, job2);
    // Add tasks of job2
    updated = updated.updateTask(tasks2.get(0), false).orElse(null);
    assertThat(updated).isNotNull();
    updated = updated.updateTask(tasks2.get(1), false).orElse(null);
    assertThat(updated).isNotNull();
    assertThat(updated.getTasks(job2.getId()).values()).containsAll(tasks2);
    assertThat(updated.getTaskMap()).hasSize(4);
    // Modify job1
    Job<?> updatedJob = job1.toBuilder().withVersion(Version.newBuilder().withTimestamp(123).build()).build();
    updated = updated.updateJob(updatedJob).orElse(null);
    assertThat(updated).isNotNull();
    assertThat(updated.getJobMap()).containsValues(updatedJob, job2);
    // Modify task (job1)
    Task updatedTask = tasks1.get(0).toBuilder().withVersion(Version.newBuilder().withTimestamp(123).build()).build();
    updated = updated.updateTask(updatedTask, false).orElse(null);
    assertThat(updated).isNotNull();
    assertThat(updated.getTasks(job1.getId())).hasSize(2);
    assertThat(updated.getTasks(job1.getId()).values()).contains(tasks1.get(1)).contains(updatedTask);
    assertThat(updated.getTaskMap()).hasSize(4);
    assertThat(updated.getTaskMap().get(updatedTask.getId())).isEqualTo(updatedTask);
    // Remove task (job1)
    updated = updated.updateTask(updatedTask.toBuilder().withStatus(TaskStatus.newBuilder().withState(TaskState.Finished).build()).build(), false).orElse(null);
    assertThat(updated).isNotNull();
    assertThat(updated.getTasks(job1.getId()).values()).containsExactly(tasks1.get(1));
    assertThat(updated.getTaskMap()).hasSize(3);
    // Remove job1
    updated = updated.updateJob(updatedJob.toBuilder().withStatus(JobStatus.newBuilder().withState(JobState.Finished).build()).build()).orElse(null);
    assertThat(updated).isNotNull();
    assertThat(updated.getJobMap()).hasSize(1).containsEntry(job2.getId(), job2);
    assertThat(updated.getTasks(job2.getId()).values()).containsAll(tasks2);
    assertThat(updated.getTaskMap()).hasSize(2).containsValues(tasks2.get(0), tasks2.get(1));
}
Also used : Task(com.netflix.titus.api.jobmanager.model.job.Task) ArrayList(java.util.ArrayList) Job(com.netflix.titus.api.jobmanager.model.job.Job) Map(java.util.Map) PMap(org.pcollections.PMap) Pair(com.netflix.titus.common.util.tuple.Pair) Test(org.junit.Test)

Example 3 with Pair

use of com.netflix.titus.common.util.tuple.Pair in project titus-control-plane by Netflix.

the class JooqTaskRelocationGC method removeExpiredData.

/**
 * Removes all entries older than the given time threshold.
 */
@VisibleForTesting
int removeExpiredData(long timeThreshold) {
    // Count all items
    int allCount = dslContext.fetchCount(Relocation.RELOCATION.RELOCATION_STATUS);
    logger.info("All rows in 'relocation_status' table: {}", allCount);
    allRowsGauge.set(allCount);
    int expiredCount = dslContext.fetchCount(Relocation.RELOCATION.RELOCATION_STATUS, Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME.lt(Timestamp.from(Instant.ofEpochMilli(timeThreshold))));
    logger.info("Expired rows in 'relocation_status' table: {}", expiredCount);
    expiredRowsGauge.set(expiredCount);
    if (expiredCount <= 0) {
        return 0;
    }
    // Locate timestamp from which to remove.
    Result<Record2<String, Timestamp>> timestampRow = dslContext.select(Relocation.RELOCATION.RELOCATION_STATUS.TASK_ID, Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME).from(Relocation.RELOCATION.RELOCATION_STATUS).where(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME.lt(Timestamp.from(Instant.ofEpochMilli(timeThreshold)))).orderBy(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME.asc()).limit(configuration.getGcRowLimit()).fetch();
    if (timestampRow.isEmpty()) {
        logger.info("No expired data found");
        return 0;
    }
    Timestamp lastToRemove = timestampRow.get(timestampRow.size() - 1).getValue(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME);
    // Load all data up to the given timestamp. This could be more data than above (and more than the GC limit when
    // there are records with the lastToRemove timestamp, which were not returned due to the limit constraint.
    // This is fine, as we do not expect that there are too many like this.
    Result<Record2<String, Timestamp>> toRemoveRows = dslContext.select(Relocation.RELOCATION.RELOCATION_STATUS.TASK_ID, Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME).from(Relocation.RELOCATION.RELOCATION_STATUS).where(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME.le(lastToRemove)).orderBy(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME.asc()).fetch();
    List<Pair<String, Long>> toRemoveSet = toRemoveRows.stream().map(r -> Pair.of(r.get(Relocation.RELOCATION.RELOCATION_STATUS.TASK_ID), r.get(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME).getTime())).collect(Collectors.toList());
    logger.info("Records to remove: {}", toRemoveSet);
    int removedFromDb = dslContext.delete(Relocation.RELOCATION.RELOCATION_STATUS).where(Relocation.RELOCATION.RELOCATION_STATUS.RELOCATION_EXECUTION_TIME.le(lastToRemove)).execute();
    logger.info("Removed expired rows from 'relocation_status' table: {}", removedFromDb);
    gcCounter.increment(removedFromDb);
    relocationResultStore.removeFromCache(toRemoveSet);
    return removedFromDb;
}
Also used : Gauge(com.netflix.spectator.api.Gauge) Stopwatch(com.google.common.base.Stopwatch) Counter(com.netflix.spectator.api.Counter) LoggerFactory(org.slf4j.LoggerFactory) Singleton(javax.inject.Singleton) ScheduleReference(com.netflix.titus.common.framework.scheduler.ScheduleReference) Inject(javax.inject.Inject) PreDestroy(javax.annotation.PreDestroy) Record2(org.jooq.Record2) Pair(com.netflix.titus.common.util.tuple.Pair) Duration(java.time.Duration) DSLContext(org.jooq.DSLContext) Logger(org.slf4j.Logger) Timestamp(java.sql.Timestamp) LeaderActivationListener(com.netflix.titus.api.common.LeaderActivationListener) Instant(java.time.Instant) Result(org.jooq.Result) Collectors(java.util.stream.Collectors) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) ScheduleDescriptor(com.netflix.titus.common.framework.scheduler.model.ScheduleDescriptor) VisibleForTesting(com.google.common.annotations.VisibleForTesting) TitusRuntime(com.netflix.titus.common.runtime.TitusRuntime) Evaluators(com.netflix.titus.common.util.Evaluators) TaskRelocationResultStore(com.netflix.titus.supplementary.relocation.store.TaskRelocationResultStore) Timestamp(java.sql.Timestamp) Record2(org.jooq.Record2) Pair(com.netflix.titus.common.util.tuple.Pair) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 4 with Pair

use of com.netflix.titus.common.util.tuple.Pair in project titus-control-plane by Netflix.

the class TestStoreLoadCommand method execute.

@Override
public void execute(CommandContext commandContext) {
    CommandLine commandLine = commandContext.getCommandLine();
    String keyspace = commandContext.getTargetKeySpace();
    Integer jobs = Integer.valueOf(commandLine.getOptionValue("jobs"));
    Integer tasks = Integer.valueOf(commandLine.getOptionValue("tasks"));
    Integer concurrency = Integer.valueOf(commandLine.getOptionValue("concurrency"));
    Integer iterations = Integer.valueOf(commandLine.getOptionValue("iterations"));
    Session session = commandContext.getTargetSession();
    boolean keyspaceExists = session.getCluster().getMetadata().getKeyspace(keyspace) != null;
    if (!keyspaceExists) {
        throw new IllegalStateException("Keyspace: " + keyspace + " does not exist. You must create it first.");
    }
    session.execute("USE " + keyspace);
    JobStore titusStore = new CassandraJobStore(CONFIGURATION, session, TitusRuntimes.internal());
    // Create jobs and tasks
    long jobStartTime = System.currentTimeMillis();
    List<Observable<Void>> createJobAndTasksObservables = new ArrayList<>();
    for (int i = 0; i < jobs; i++) {
        createJobAndTasksObservables.add(createJobAndTasksObservable(tasks, titusStore));
    }
    Observable.merge(createJobAndTasksObservables, concurrency).toBlocking().subscribe(none -> {
    }, e -> logger.error("Error creating jobs: ", e), () -> {
        logger.info("Created {} jobs with {} tasks in {}[ms]", jobs, tasks, System.currentTimeMillis() - jobStartTime);
    });
    // try loading jobs and tasks for i iterations
    long loadTotalTime = 0L;
    for (int i = 0; i < iterations; i++) {
        long loadStartTime = System.currentTimeMillis();
        List<Pair<Job, List<Task>>> pairs = new ArrayList<>();
        titusStore.init().andThen(titusStore.retrieveJobs().flatMap(retrievedJobsAndErrors -> {
            List<Job<?>> retrievedJobs = retrievedJobsAndErrors.getLeft();
            List<Observable<Pair<Job, List<Task>>>> retrieveTasksObservables = new ArrayList<>();
            for (Job job : retrievedJobs) {
                Observable<Pair<Job, List<Task>>> retrieveTasksObservable = titusStore.retrieveTasksForJob(job.getId()).map(taskList -> new Pair<>(job, taskList.getLeft()));
                retrieveTasksObservables.add(retrieveTasksObservable);
            }
            return Observable.merge(retrieveTasksObservables, MAX_RETRIEVE_TASK_CONCURRENCY);
        })).map(p -> {
            pairs.add(p);
            return null;
        }).toBlocking().subscribe(none -> {
        }, e -> logger.error("Failed to load jobs from cassandra with error: ", e), () -> {
        });
        long loadTime = System.currentTimeMillis() - loadStartTime;
        logger.info("Loaded {} jobs from cassandra in {}[ms]", pairs.size(), loadTime);
        loadTotalTime += loadTime;
    }
    logger.info("Average load time: {}[ms]", loadTotalTime / iterations);
}
Also used : CassandraJobStore(com.netflix.titus.ext.cassandra.store.CassandraJobStore) BatchJobTask(com.netflix.titus.api.jobmanager.model.job.BatchJobTask) Task(com.netflix.titus.api.jobmanager.model.job.Task) ArrayList(java.util.ArrayList) JobStore(com.netflix.titus.api.jobmanager.store.JobStore) CassandraJobStore(com.netflix.titus.ext.cassandra.store.CassandraJobStore) Observable(rx.Observable) CommandLine(org.apache.commons.cli.CommandLine) ArrayList(java.util.ArrayList) List(java.util.List) Job(com.netflix.titus.api.jobmanager.model.job.Job) Session(com.datastax.driver.core.Session) Pair(com.netflix.titus.common.util.tuple.Pair)

Example 5 with Pair

use of com.netflix.titus.common.util.tuple.Pair in project titus-control-plane by Netflix.

the class JobSnapshotLoader method readDataTable.

private void readDataTable(String table) {
    ArrayNode jsonTree = (ArrayNode) readJsonTree(table);
    List<Pair<Object, Object>> items = new ArrayList<>();
    jsonTree.forEach(item -> {
        try {
            items.add(Pair.of(item.get("id").textValue(), MAPPER.writeValueAsString(item)));
        } catch (JsonProcessingException e) {
            throw new IllegalStateException(e);
        }
    });
    long written = CassandraUtils.writeIntoTwoColumnTable(session, table, Observable.from(items));
    System.out.println(String.format("Successfully writen %s entries into table %s", written, table));
}
Also used : ArrayList(java.util.ArrayList) ArrayNode(com.fasterxml.jackson.databind.node.ArrayNode) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) Pair(com.netflix.titus.common.util.tuple.Pair)

Aggregations

Pair (com.netflix.titus.common.util.tuple.Pair)41 Task (com.netflix.titus.api.jobmanager.model.job.Task)22 List (java.util.List)21 ArrayList (java.util.ArrayList)18 Job (com.netflix.titus.api.jobmanager.model.job.Job)14 Map (java.util.Map)10 Collectors (java.util.stream.Collectors)10 TitusRuntime (com.netflix.titus.common.runtime.TitusRuntime)8 Optional (java.util.Optional)8 Logger (org.slf4j.Logger)8 LoggerFactory (org.slf4j.LoggerFactory)8 TaskState (com.netflix.titus.api.jobmanager.model.job.TaskState)7 HashMap (java.util.HashMap)7 JobFunctions (com.netflix.titus.api.jobmanager.model.job.JobFunctions)6 Collections (java.util.Collections)6 Observable (rx.Observable)6 TaskRelocationPlan (com.netflix.titus.api.relocation.model.TaskRelocationPlan)5 Function (java.util.function.Function)5 PreparedStatement (com.datastax.driver.core.PreparedStatement)4 Session (com.datastax.driver.core.Session)4