use of com.hazelcast.jet.core.Vertex in project hazelcast by hazelcast.
the class MasterJobContext method completeVertices.
private void completeVertices(@Nullable Throwable failure) {
if (vertices != null) {
JobClassLoaderService classLoaderService = mc.getJetServiceBackend().getJobClassLoaderService();
JetDelegatingClassLoader jobCl = classLoaderService.getClassLoader(mc.jobId());
doWithClassLoader(jobCl, () -> {
for (Vertex v : vertices) {
try {
ClassLoader processorCl = classLoaderService.getProcessorClassLoader(mc.jobId(), v.getName());
doWithClassLoader(processorCl, () -> v.getMetaSupplier().close(failure));
} catch (Throwable e) {
logger.severe(mc.jobIdString() + " encountered an exception in ProcessorMetaSupplier.close(), ignoring it", e);
}
}
});
}
}
use of com.hazelcast.jet.core.Vertex in project hazelcast by hazelcast.
the class MasterJobContext method formatExecutionSummary.
private String formatExecutionSummary(String conclusion, long completionTime) {
StringBuilder sb = new StringBuilder();
sb.append("Execution of ").append(mc.jobIdString()).append(' ').append(conclusion);
sb.append("\n\t").append("Start time: ").append(Util.toLocalDateTime(executionStartTime));
sb.append("\n\t").append("Duration: ").append(formatJobDuration(completionTime - executionStartTime));
if (jobMetrics.stream().noneMatch(rjm -> rjm.getBlob() != null)) {
sb.append("\n\tTo see additional job metrics enable JobConfig.storeMetricsAfterJobCompletion");
} else {
JobMetrics jobMetrics = JobMetricsUtil.toJobMetrics(this.jobMetrics);
Map<String, Long> receivedCounts = mergeByVertex(jobMetrics.get(MetricNames.RECEIVED_COUNT));
Map<String, Long> emittedCounts = mergeByVertex(jobMetrics.get(MetricNames.EMITTED_COUNT));
Map<String, Long> distributedBytesIn = mergeByVertex(jobMetrics.get(MetricNames.DISTRIBUTED_BYTES_IN));
Map<String, Long> distributedBytesOut = mergeByVertex(jobMetrics.get(MetricNames.DISTRIBUTED_BYTES_OUT));
sb.append("\n\tVertices:");
for (Vertex vertex : vertices) {
sb.append("\n\t\t").append(vertex.getName());
sb.append(getValueForVertex("\n\t\t\t" + MetricNames.RECEIVED_COUNT, vertex, receivedCounts));
sb.append(getValueForVertex("\n\t\t\t" + MetricNames.EMITTED_COUNT, vertex, emittedCounts));
sb.append(getValueForVertex("\n\t\t\t" + MetricNames.DISTRIBUTED_BYTES_IN, vertex, distributedBytesIn));
sb.append(getValueForVertex("\n\t\t\t" + MetricNames.DISTRIBUTED_BYTES_OUT, vertex, distributedBytesOut));
}
}
return sb.toString();
}
use of com.hazelcast.jet.core.Vertex in project hazelcast by hazelcast.
the class Util method copyMapUsingJob.
// used in jet-enterprise
@SuppressWarnings("WeakerAccess")
public static CompletableFuture<Void> copyMapUsingJob(HazelcastInstance instance, int queueSize, String sourceMap, String targetMap) {
DAG dag = new DAG();
Vertex source = dag.newVertex("readMap(" + sourceMap + ')', readMapP(sourceMap));
Vertex sink = dag.newVertex("writeMap(" + targetMap + ')', writeMapP(targetMap));
dag.edge(between(source, sink).setConfig(new EdgeConfig().setQueueSize(queueSize)));
JobConfig jobConfig = new JobConfig().setName("copy-" + sourceMap + "-to-" + targetMap);
return instance.getJet().newJob(dag, jobConfig).getFuture();
}
use of com.hazelcast.jet.core.Vertex in project hazelcast by hazelcast.
the class AggregateTransform method addToDagTwoStage.
// WHEN PRESERVE ORDER IS NOT ACTIVE
// --------- ---------
// | source0 | ... | sourceN |
// --------- ---------
// | |
// local local
// unicast unicast
// v v
// -------------------
// | accumulateP |
// -------------------
// |
// distributed
// all-to-one
// v
// ----------------
// | combineP | local parallelism = 1
// ----------------
// WHEN PRESERVE ORDER IS ACTIVE
// --------- ---------
// | source0 | ... | sourceN |
// --------- ---------
// | |
// isolated isolated
// v v
// -------------------
// | accumulateP |
// -------------------
// |
// distributed
// all-to-one
// v
// ----------------
// | combineP | local parallelism = 1
// ----------------
private void addToDagTwoStage(Planner p, Context context) {
String vertexName = name();
determineLocalParallelism(LOCAL_PARALLELISM_USE_DEFAULT, context, p.isPreserveOrder());
Vertex v1 = p.dag.newVertex(vertexName + FIRST_STAGE_VERTEX_NAME_SUFFIX, accumulateP(aggrOp)).localParallelism(determinedLocalParallelism());
if (p.isPreserveOrder()) {
p.addEdges(this, v1, Edge::isolated);
} else {
p.addEdges(this, v1);
}
determinedLocalParallelism(1);
PlannerVertex pv2 = p.addVertex(this, vertexName, determinedLocalParallelism(), ProcessorMetaSupplier.forceTotalParallelismOne(ProcessorSupplier.of(combineP(aggrOp)), vertexName));
p.dag.edge(between(v1, pv2.v).distributed().allToOne(vertexName));
}
use of com.hazelcast.jet.core.Vertex in project hazelcast by hazelcast.
the class GroupTransform method addToDagTwoStage.
// --------- ---------
// | source0 | ... | sourceN |
// --------- ---------
// | |
// local local
// partitioned partitioned
// v v
// --------------------
// | accumulateByKeyP |
// --------------------
// |
// distributed
// partitioned
// v
// ---------------
// | combineByKeyP |
// ---------------
private void addToDagTwoStage(Planner p) {
List<FunctionEx<?, ? extends K>> groupKeyFns = this.groupKeyFns;
Vertex v1 = p.dag.newVertex(name() + FIRST_STAGE_VERTEX_NAME_SUFFIX, accumulateByKeyP(groupKeyFns, aggrOp)).localParallelism(determinedLocalParallelism());
PlannerVertex pv2 = p.addVertex(this, name(), determinedLocalParallelism(), combineByKeyP(aggrOp, mapToOutputFn));
p.addEdges(this, v1, (e, ord) -> e.partitioned(groupKeyFns.get(ord), HASH_CODE));
p.dag.edge(between(v1, pv2.v).distributed().partitioned(entryKey()));
}
Aggregations