use of com.hazelcast.jet.impl.JobRepository.EXPORTED_SNAPSHOTS_PREFIX in project hazelcast by hazelcast.
the class MasterJobContext method tryStartJob.
/**
* Starts the execution of the job if it is not already completed,
* cancelled or failed.
* <p>
* If the job is already cancelled, triggers the job completion procedure.
* <p>
* If the job quorum is not satisfied, reschedules the job restart.
* <p>
* If there was a membership change and the partition table is not completely
* fixed yet, reschedules the job restart.
*/
void tryStartJob(Supplier<Long> executionIdSupplier) {
mc.coordinationService().submitToCoordinatorThread(() -> {
executionStartTime = System.currentTimeMillis();
try {
JobExecutionRecord jobExecRec = mc.jobExecutionRecord();
jobExecRec.markExecuted();
Tuple2<DAG, ClassLoader> dagAndClassloader = resolveDagAndCL(executionIdSupplier);
if (dagAndClassloader == null) {
return;
}
DAG dag = dagAndClassloader.f0();
assert dag != null;
ClassLoader classLoader = dagAndClassloader.f1();
// must call this before rewriteDagWithSnapshotRestore()
String dotRepresentation = dag.toDotString(defaultParallelism, defaultQueueSize);
long snapshotId = jobExecRec.snapshotId();
String snapshotName = mc.jobConfig().getInitialSnapshotName();
String mapName = snapshotId >= 0 ? jobExecRec.successfulSnapshotDataMapName(mc.jobId()) : snapshotName != null ? EXPORTED_SNAPSHOTS_PREFIX + snapshotName : null;
if (mapName != null) {
rewriteDagWithSnapshotRestore(dag, snapshotId, mapName, snapshotName);
} else {
logger.info("Didn't find any snapshot to restore for " + mc.jobIdString());
}
MembersView membersView = Util.getMembersView(mc.nodeEngine());
logger.info("Start executing " + mc.jobIdString() + ", execution graph in DOT format:\n" + dotRepresentation + "\nHINT: You can use graphviz or http://viz-js.com to visualize the printed graph.");
logger.fine("Building execution plan for " + mc.jobIdString());
Util.doWithClassLoader(classLoader, () -> mc.setExecutionPlanMap(createExecutionPlans(mc.nodeEngine(), membersView.getMembers(), dag, mc.jobId(), mc.executionId(), mc.jobConfig(), jobExecRec.ongoingSnapshotId(), false, mc.jobRecord().getSubject())));
logger.fine("Built execution plans for " + mc.jobIdString());
Set<MemberInfo> participants = mc.executionPlanMap().keySet();
Version coordinatorVersion = mc.nodeEngine().getLocalMember().getVersion().asVersion();
Function<ExecutionPlan, Operation> operationCtor = plan -> new InitExecutionOperation(mc.jobId(), mc.executionId(), membersView.getVersion(), coordinatorVersion, participants, mc.nodeEngine().getSerializationService().toData(plan), false);
mc.invokeOnParticipants(operationCtor, this::onInitStepCompleted, null, false);
} catch (Throwable e) {
finalizeJob(e);
}
});
}
use of com.hazelcast.jet.impl.JobRepository.EXPORTED_SNAPSHOTS_PREFIX in project hazelcast by hazelcast.
the class MasterSnapshotContext method onSnapshotPhase1CompleteWithStartResponses.
private void onSnapshotPhase1CompleteWithStartResponses(Collection<Entry<MemberInfo, Object>> responses, long executionId, long snapshotId, String snapshotMapName, int snapshotFlags, @Nullable CompletableFuture<Void> future, SnapshotPhase1Result mergedResult, List<CompletableFuture<Void>> missingResponses) {
mc.coordinationService().submitToCoordinatorThread(() -> {
mc.lock();
boolean isSuccess;
SnapshotStats stats;
try {
if (!missingResponses.isEmpty()) {
LoggingUtil.logFine(logger, "%s all awaited responses to StartExecutionOperation received or " + "were already received", mc.jobIdString());
}
// Check the execution ID to check if a new execution didn't start yet.
if (executionId != mc.executionId()) {
LoggingUtil.logFine(logger, "%s: ignoring responses for snapshot %s phase 1: " + "the responses are from a different execution: %s. Responses: %s", mc.jobIdString(), snapshotId, idToString(executionId), responses);
// a new execution started, ignore this response.
return;
}
for (CompletableFuture<Void> response : missingResponses) {
assert response.isDone() : "response not done";
try {
response.get();
} catch (ExecutionException e) {
mergedResult.merge(new SnapshotPhase1Result(0, 0, 0, e.getCause()));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
IMap<Object, Object> snapshotMap = mc.nodeEngine().getHazelcastInstance().getMap(snapshotMapName);
try {
SnapshotValidationRecord validationRecord = new SnapshotValidationRecord(snapshotId, mergedResult.getNumChunks(), mergedResult.getNumBytes(), mc.jobExecutionRecord().ongoingSnapshotStartTime(), mc.jobId(), mc.jobName(), mc.jobRecord().getDagJson());
// The decision moment for exported snapshots: after this the snapshot is valid to be restored
// from, however it will be not listed by JetInstance.getJobStateSnapshots unless the validation
// record is inserted into the cache below
Object oldValue = snapshotMap.put(SnapshotValidationRecord.KEY, validationRecord);
if (snapshotMapName.startsWith(EXPORTED_SNAPSHOTS_PREFIX)) {
String snapshotName = snapshotMapName.substring(EXPORTED_SNAPSHOTS_PREFIX.length());
mc.jobRepository().cacheValidationRecord(snapshotName, validationRecord);
}
if (oldValue != null) {
logger.severe("SnapshotValidationRecord overwritten after writing to '" + snapshotMapName + "' for " + mc.jobIdString() + ": snapshot data might be corrupted");
}
} catch (Exception e) {
mergedResult.merge(new SnapshotPhase1Result(0, 0, 0, e));
}
isSuccess = mergedResult.getError() == null;
stats = mc.jobExecutionRecord().ongoingSnapshotDone(mergedResult.getNumBytes(), mergedResult.getNumKeys(), mergedResult.getNumChunks(), mergedResult.getError());
// the decision moment for regular snapshots: after this the snapshot is ready to be restored from
mc.writeJobExecutionRecord(false);
if (logger.isFineEnabled()) {
logger.fine(String.format("Snapshot %d phase 1 for %s completed with status %s in %dms, " + "%,d bytes, %,d keys in %,d chunks, stored in '%s', proceeding to phase 2", snapshotId, mc.jobIdString(), isSuccess ? "SUCCESS" : "FAILURE", stats.duration(), stats.numBytes(), stats.numKeys(), stats.numChunks(), snapshotMapName));
}
if (!isSuccess) {
logger.warning(mc.jobIdString() + " snapshot " + snapshotId + " phase 1 failed on some " + "member(s), one of the failures: " + mergedResult.getError());
try {
snapshotMap.clear();
} catch (Exception e) {
logger.warning(mc.jobIdString() + ": failed to clear snapshot map '" + snapshotMapName + "' after a failure", e);
}
}
if (!SnapshotFlags.isExport(snapshotFlags)) {
mc.jobRepository().clearSnapshotData(mc.jobId(), mc.jobExecutionRecord().ongoingDataMapIndex());
}
} finally {
mc.unlock();
}
// start the phase 2
Function<ExecutionPlan, Operation> factory = plan -> new SnapshotPhase2Operation(mc.jobId(), executionId, snapshotId, isSuccess && !SnapshotFlags.isExportOnly(snapshotFlags));
mc.invokeOnParticipants(factory, responses2 -> onSnapshotPhase2Complete(mergedResult.getError(), responses2, executionId, snapshotId, snapshotFlags, future, stats.startTime()), null, true);
});
}
Aggregations