use of com.hazelcast.jet.impl.execution.SnapshotRecord in project hazelcast-jet by hazelcast.
the class SnapshotRepository method setSnapshotStatus.
/**
* Updates status of the given snapshot. Returns the elapsed time for the snapshot.
*/
long setSnapshotStatus(long jobId, long snapshotId, SnapshotStatus status) {
IMapJet<Long, SnapshotRecord> snapshots = getSnapshotMap(jobId);
SnapshotRecord record = compute(snapshots, snapshotId, (k, r) -> {
r.setStatus(status);
return r;
});
return System.currentTimeMillis() - record.startTime();
}
use of com.hazelcast.jet.impl.execution.SnapshotRecord in project hazelcast-jet by hazelcast.
the class JobRestartWithSnapshotTest method waitForNextSnapshot.
private void waitForNextSnapshot(IMapJet<Long, Object> snapshotsMap, int timeoutSeconds) {
SnapshotRecord maxRecord = findMaxRecord(snapshotsMap);
assertNotNull("no snapshot found", maxRecord);
// wait until there is at least one more snapshot
assertTrueEventually(() -> assertTrue("No more snapshots produced after restart", findMaxRecord(snapshotsMap).snapshotId() > maxRecord.snapshotId()), timeoutSeconds);
}
use of com.hazelcast.jet.impl.execution.SnapshotRecord in project hazelcast-jet by hazelcast.
the class SnapshotFailureTest method when_snapshotFails_then_jobShouldNotFail.
@Test
public void when_snapshotFails_then_jobShouldNotFail() {
int numPartitions = 2;
int numElements = 10;
IMapJet<Object, Object> results = instance1.getMap("results");
DAG dag = new DAG();
SequencesInPartitionsMetaSupplier sup = new SequencesInPartitionsMetaSupplier(numPartitions, numElements);
Vertex generator = dag.newVertex("generator", peekOutputP(throttle(sup, 2))).localParallelism(1);
Vertex writeMap = dag.newVertex("writeMap", writeMapP(results.getName())).localParallelism(1);
dag.edge(between(generator, writeMap));
JobConfig config = new JobConfig();
config.setProcessingGuarantee(ProcessingGuarantee.EXACTLY_ONCE);
config.setSnapshotIntervalMillis(100);
Job job = instance1.newJob(dag, config);
// let's start a second job that will watch the snapshots map and write failed
// SnapshotRecords to a list, which we will check for presence of failed snapshot
Pipeline p = Pipeline.create();
p.drawFrom(Sources.mapJournal(snapshotsMapName(job.getId()), event -> event.getNewValue() instanceof SnapshotRecord && ((SnapshotRecord) event.getNewValue()).status() == SnapshotStatus.FAILED, EventJournalMapEvent::getNewValue, JournalInitialPosition.START_FROM_OLDEST)).peek().drainTo(Sinks.list("failed_snapshot_records"));
instance1.newJob(p);
job.join();
assertEquals("numPartitions", numPartitions, results.size());
assertEquals("offset partition 0", numElements - 1, results.get(0));
assertEquals("offset partition 1", numElements - 1, results.get(1));
assertTrue("no failure occurred in store", storeFailed);
assertFalse("no failed snapshot appeared in snapshotsMap", instance1.getList("failed_snapshot_records").isEmpty());
}
Aggregations