use of com.hazelcast.jet.core.JobRestartWithSnapshotTest.SequencesInPartitionsMetaSupplier in project hazelcast-jet by hazelcast.
the class SnapshotFailureTest method when_snapshotFails_then_jobShouldNotFail.
@Test
public void when_snapshotFails_then_jobShouldNotFail() {
int numPartitions = 2;
int numElements = 10;
IMapJet<Object, Object> results = instance1.getMap("results");
DAG dag = new DAG();
SequencesInPartitionsMetaSupplier sup = new SequencesInPartitionsMetaSupplier(numPartitions, numElements);
Vertex generator = dag.newVertex("generator", peekOutputP(throttle(sup, 2))).localParallelism(1);
Vertex writeMap = dag.newVertex("writeMap", writeMapP(results.getName())).localParallelism(1);
dag.edge(between(generator, writeMap));
JobConfig config = new JobConfig();
config.setProcessingGuarantee(ProcessingGuarantee.EXACTLY_ONCE);
config.setSnapshotIntervalMillis(100);
Job job = instance1.newJob(dag, config);
// let's start a second job that will watch the snapshots map and write failed
// SnapshotRecords to a list, which we will check for presence of failed snapshot
Pipeline p = Pipeline.create();
p.drawFrom(Sources.mapJournal(snapshotsMapName(job.getId()), event -> event.getNewValue() instanceof SnapshotRecord && ((SnapshotRecord) event.getNewValue()).status() == SnapshotStatus.FAILED, EventJournalMapEvent::getNewValue, JournalInitialPosition.START_FROM_OLDEST)).peek().drainTo(Sinks.list("failed_snapshot_records"));
instance1.newJob(p);
job.join();
assertEquals("numPartitions", numPartitions, results.size());
assertEquals("offset partition 0", numElements - 1, results.get(0));
assertEquals("offset partition 1", numElements - 1, results.get(1));
assertTrue("no failure occurred in store", storeFailed);
assertFalse("no failed snapshot appeared in snapshotsMap", instance1.getList("failed_snapshot_records").isEmpty());
}
Aggregations