use of com.hazelcast.jet.core.DAG in project hazelcast-jet by hazelcast.
the class AnyMatchReducer method reduce.
@Override
public Boolean reduce(StreamContext context, Pipe<? extends T> upstream) {
String listName = uniqueListName();
DAG dag = new DAG();
Vertex previous = upstream.buildDAG(dag);
Vertex anyMatch = dag.newVertex("any-match", () -> new AnyMatchP<>(predicate));
Vertex writer = dag.newVertex("write-" + listName, SinkProcessors.writeListP(listName));
dag.edge(between(previous, anyMatch)).edge(between(anyMatch, writer));
executeJob(context, dag);
IList<Boolean> results = context.getJetInstance().getList(listName);
boolean result = anyMatch(results);
results.destroy();
return result;
}
use of com.hazelcast.jet.core.DAG in project hazelcast-jet by hazelcast.
the class BiConsumerCombinerReducer method reduce.
@Override
public R reduce(StreamContext context, Pipe<? extends T> upstream) {
DAG dag = new DAG();
Vertex accumulatorVertex = buildAccumulator(dag, upstream, supplier, accumulator);
Vertex combinerVertex = buildCombiner(dag, accumulatorVertex, combiner);
return execute(context, dag, combinerVertex, DistributedFunction.identity());
}
use of com.hazelcast.jet.core.DAG in project hazelcast-jet by hazelcast.
the class GroupingSinkReducer method reduce.
@Override
public R reduce(StreamContext context, Pipe<? extends T> upstream) {
DAG dag = new DAG();
Vertex previous = upstream.buildDAG(dag);
Vertex merger = dag.newVertex("group-and-accumulate", () -> new GroupAndAccumulateP<>(classifier, collector));
Vertex combiner = dag.newVertex("combine-groups", () -> new CombineGroupsP<>(collector));
Vertex writer = dag.newVertex(sinkName, metaSupplier);
dag.edge(between(previous, merger).partitioned(classifier::apply, HASH_CODE)).edge(between(merger, combiner).distributed().partitioned(entryKey())).edge(between(combiner, writer));
executeJob(context, dag);
return toDistributedObject.apply(context.getJetInstance());
}
use of com.hazelcast.jet.core.DAG in project hazelcast-jet by hazelcast.
the class IListReducer method reduce.
@Override
public IListJet<T> reduce(StreamContext context, Pipe<? extends T> upstream) {
IListJet<T> target = context.getJetInstance().getList(listName);
DAG dag = new DAG();
Vertex vertex = upstream.buildDAG(dag);
Vertex writer = dag.newVertex("write-list-" + listName, SinkProcessors.writeListP(listName)).localParallelism(1);
dag.edge(between(vertex, writer));
executeJob(context, dag);
return target;
}
use of com.hazelcast.jet.core.DAG in project hazelcast-jet by hazelcast.
the class MasterContext method tryStartJob.
/**
* Starts execution of the job if it is not already completed, cancelled or failed.
* If the job is already cancelled, the job completion procedure is triggered.
* If the job quorum is not satisfied, job restart is rescheduled.
* If there was a membership change and the partition table is not completely
* fixed yet, job restart is rescheduled.
*/
void tryStartJob(Function<Long, Long> executionIdSupplier) {
if (!setJobStatusToStarting()) {
return;
}
if (scheduleRestartIfQuorumAbsent() || scheduleRestartIfClusterIsNotSafe()) {
return;
}
DAG dag;
try {
dag = deserializeDAG();
} catch (Exception e) {
logger.warning("DAG deserialization failed", e);
finalizeJob(e);
return;
}
// save a copy of the vertex list, because it is going to change
vertices = new HashSet<>();
dag.iterator().forEachRemaining(vertices::add);
executionId = executionIdSupplier.apply(jobId);
// last started snapshot complete or not complete. The next started snapshot must be greater than this number
long lastSnapshotId = NO_SNAPSHOT;
if (isSnapshottingEnabled()) {
Long snapshotIdToRestore = snapshotRepository.latestCompleteSnapshot(jobId);
snapshotRepository.deleteAllSnapshotsExceptOne(jobId, snapshotIdToRestore);
Long lastStartedSnapshot = snapshotRepository.latestStartedSnapshot(jobId);
if (snapshotIdToRestore != null) {
logger.info("State of " + jobIdString() + " will be restored from snapshot " + snapshotIdToRestore);
rewriteDagWithSnapshotRestore(dag, snapshotIdToRestore);
} else {
logger.info("No previous snapshot for " + jobIdString() + " found.");
}
if (lastStartedSnapshot != null) {
lastSnapshotId = lastStartedSnapshot;
}
}
MembersView membersView = getMembersView();
ClassLoader previousCL = swapContextClassLoader(coordinationService.getClassLoader(jobId));
try {
int defaultLocalParallelism = getJetInstance(nodeEngine).getConfig().getInstanceConfig().getCooperativeThreadCount();
logger.info("Start executing " + jobIdString() + ", status " + jobStatus() + "\n" + dag.toString(defaultLocalParallelism));
logger.fine("Building execution plan for " + jobIdString());
executionPlanMap = createExecutionPlans(nodeEngine, membersView, dag, getJobConfig(), lastSnapshotId);
} catch (Exception e) {
logger.severe("Exception creating execution plan for " + jobIdString(), e);
finalizeJob(e);
return;
} finally {
Thread.currentThread().setContextClassLoader(previousCL);
}
logger.fine("Built execution plans for " + jobIdString());
Set<MemberInfo> participants = executionPlanMap.keySet();
Function<ExecutionPlan, Operation> operationCtor = plan -> new InitExecutionOperation(jobId, executionId, membersView.getVersion(), participants, nodeEngine.getSerializationService().toData(plan));
invoke(operationCtor, this::onInitStepCompleted, null);
}
Aggregations