use of com.hazelcast.jet.impl.pipeline.PipelineImpl in project hazelcast by hazelcast.
the class OrderedBatchParallelismTest method applyTransformAndGetDag.
private DAG applyTransformAndGetDag(FunctionEx<BatchStage<Long>, BatchStage<Long>> transform) {
PipelineImpl p = (PipelineImpl) Pipeline.create().setPreserveOrder(true);
BatchStage<Long> source = p.readFrom(TestSources.items(1L)).setLocalParallelism(UPSTREAM_PARALLELISM);
BatchStage<Long> applied = source.apply(transform);
applied.mapStateful(LongAccumulator::new, (s, x) -> x).writeTo(Sinks.noop());
return p.toDag(PIPELINE_CTX);
}
use of com.hazelcast.jet.impl.pipeline.PipelineImpl in project hazelcast by hazelcast.
the class JobCoordinationService method submitJob.
public CompletableFuture<Void> submitJob(long jobId, Data serializedJobDefinition, JobConfig jobConfig, Subject subject) {
CompletableFuture<Void> res = new CompletableFuture<>();
submitToCoordinatorThread(() -> {
MasterContext masterContext;
try {
assertIsMaster("Cannot submit job " + idToString(jobId) + " to non-master node");
checkOperationalState();
// the order of operations is important.
// first, check if the job is already completed
JobResult jobResult = jobRepository.getJobResult(jobId);
if (jobResult != null) {
logger.fine("Not starting job " + idToString(jobId) + " since already completed with result: " + jobResult);
return;
}
if (!config.isResourceUploadEnabled() && !jobConfig.getResourceConfigs().isEmpty()) {
throw new JetException(Util.JET_RESOURCE_UPLOAD_DISABLED_MESSAGE);
}
int quorumSize = jobConfig.isSplitBrainProtectionEnabled() ? getQuorumSize() : 0;
Object jobDefinition = deserializeJobDefinition(jobId, jobConfig, serializedJobDefinition);
DAG dag;
Data serializedDag;
if (jobDefinition instanceof PipelineImpl) {
int coopThreadCount = config.getCooperativeThreadCount();
dag = ((PipelineImpl) jobDefinition).toDag(new Context() {
@Override
public int defaultLocalParallelism() {
return coopThreadCount;
}
});
serializedDag = nodeEngine().getSerializationService().toData(dag);
} else {
dag = (DAG) jobDefinition;
serializedDag = serializedJobDefinition;
}
checkPermissions(subject, dag);
Set<String> ownedObservables = ownedObservables(dag);
JobRecord jobRecord = new JobRecord(nodeEngine.getClusterService().getClusterVersion(), jobId, serializedDag, dagToJson(dag), jobConfig, ownedObservables, subject);
JobExecutionRecord jobExecutionRecord = new JobExecutionRecord(jobId, quorumSize);
masterContext = createMasterContext(jobRecord, jobExecutionRecord);
boolean hasDuplicateJobName;
synchronized (lock) {
assertIsMaster("Cannot submit job " + idToString(jobId) + " to non-master node");
checkOperationalState();
hasDuplicateJobName = jobConfig.getName() != null && hasActiveJobWithName(jobConfig.getName());
if (!hasDuplicateJobName) {
// just try to initiate the coordination
MasterContext prev = masterContexts.putIfAbsent(jobId, masterContext);
if (prev != null) {
logger.fine("Joining to already existing masterContext " + prev.jobIdString());
return;
}
}
}
if (hasDuplicateJobName) {
jobRepository.deleteJob(jobId);
throw new JobAlreadyExistsException("Another active job with equal name (" + jobConfig.getName() + ") exists: " + idToString(jobId));
}
// If job is not currently running, it might be that it is just completed
if (completeMasterContextIfJobAlreadyCompleted(masterContext)) {
return;
}
// If there is no master context and job result at the same time, it means this is the first submission
jobSubmitted.inc();
jobRepository.putNewJobRecord(jobRecord);
logger.info("Starting job " + idToString(masterContext.jobId()) + " based on submit request");
} catch (Throwable e) {
jetServiceBackend.getJobClassLoaderService().tryRemoveClassloadersForJob(jobId, COORDINATOR);
res.completeExceptionally(e);
throw e;
} finally {
res.complete(null);
}
tryStartJob(masterContext);
});
return res;
}
use of com.hazelcast.jet.impl.pipeline.PipelineImpl in project hazelcast by hazelcast.
the class OrderedStreamParallelismTest method applyTransformAndGetDag.
private DAG applyTransformAndGetDag(FunctionEx<StreamStage<Integer>, StreamStage<Integer>> transform) {
PipelineImpl p = (PipelineImpl) Pipeline.create().setPreserveOrder(true);
StreamStage<Integer> source = p.readFrom(TestSources.items(1)).setLocalParallelism(UPSTREAM_PARALLELISM).addTimestamps(t -> 0, Long.MAX_VALUE);
StreamStage<Integer> applied = source.apply(transform);
applied.mapStateful(LongAccumulator::new, (s, x) -> x).writeTo(Sinks.noop());
return p.toDag(PIPELINE_CTX);
}
Aggregations