use of com.hazelcast.jet.core.JobRestartWithSnapshotTest.SequencesInPartitionsGeneratorP in project hazelcast by hazelcast.
the class SnapshotFailureTest method when_snapshotFails_then_jobShouldNotFail.
@Test
public void when_snapshotFails_then_jobShouldNotFail() {
storeFailed = false;
int numPartitions = 2;
int numElements = 10;
IMap<Object, Object> results = instance1.getMap("results");
DAG dag = new DAG();
SupplierEx<Processor> sup = () -> new SequencesInPartitionsGeneratorP(numPartitions, numElements, false);
Vertex generator = dag.newVertex("generator", peekOutputP(throttle(sup, 2))).localParallelism(1);
Vertex writeMap = dag.newVertex("writeMap", writeMapP(results.getName())).localParallelism(1);
dag.edge(between(generator, writeMap));
JobConfig config = new JobConfig();
config.setProcessingGuarantee(ProcessingGuarantee.EXACTLY_ONCE);
config.setSnapshotIntervalMillis(100);
Job job = instance1.getJet().newJob(dag, config);
job.join();
assertEquals("numPartitions", numPartitions, results.size());
assertEquals("offset partition 0", numElements - 1, results.get(0));
assertEquals("offset partition 1", numElements - 1, results.get(1));
assertTrue("no failure occurred in store", storeFailed);
}
use of com.hazelcast.jet.core.JobRestartWithSnapshotTest.SequencesInPartitionsGeneratorP in project hazelcast by hazelcast.
the class ManualRestartTest method when_terminalSnapshotFails_then_previousSnapshotUsed.
@Test
public void when_terminalSnapshotFails_then_previousSnapshotUsed() {
MapConfig mapConfig = new MapConfig(JobRepository.SNAPSHOT_DATA_MAP_PREFIX + "*");
mapConfig.getMapStoreConfig().setClassName(FailingMapStore.class.getName()).setEnabled(true);
Config config = instances[0].getConfig();
((DynamicConfigurationAwareConfig) config).getStaticConfig().addMapConfig(mapConfig);
FailingMapStore.fail = false;
FailingMapStore.failed = false;
DAG dag = new DAG();
Vertex source = dag.newVertex("source", throttle(() -> new SequencesInPartitionsGeneratorP(2, 10000, true), 1000));
Vertex sink = dag.newVertex("sink", writeListP("sink"));
dag.edge(between(source, sink));
source.localParallelism(1);
Job job = instances[0].getJet().newJob(dag, new JobConfig().setProcessingGuarantee(EXACTLY_ONCE).setSnapshotIntervalMillis(2000));
// wait for the first snapshot
JetServiceBackend jetServiceBackend = getNode(instances[0]).nodeEngine.getService(JetServiceBackend.SERVICE_NAME);
JobRepository jobRepository = jetServiceBackend.getJobCoordinationService().jobRepository();
assertJobStatusEventually(job, RUNNING);
assertTrueEventually(() -> assertTrue(jobRepository.getJobExecutionRecord(job.getId()).dataMapIndex() >= 0));
// When
sleepMillis(100);
FailingMapStore.fail = true;
job.restart();
assertTrueEventually(() -> assertTrue(FailingMapStore.failed));
FailingMapStore.fail = false;
job.join();
Map<Integer, Integer> actual = new ArrayList<>(instances[0].<Entry<Integer, Integer>>getList("sink")).stream().filter(// we'll only check partition 0
e -> e.getKey() == 0).map(Entry::getValue).collect(Collectors.toMap(e -> e, e -> 1, (o, n) -> o + n, TreeMap::new));
assertEquals("first item != 1, " + actual.toString(), (Integer) 1, actual.get(0));
assertEquals("last item != 1, " + actual.toString(), (Integer) 1, actual.get(9999));
// the result should be some ones, then some twos and then some ones. The twos should be during the time
// since the last successful snapshot until the actual termination, when there was reprocessing.
boolean sawTwo = false;
boolean sawOneAgain = false;
for (Integer v : actual.values()) {
if (v == 1) {
if (sawTwo) {
sawOneAgain = true;
}
} else if (v == 2) {
assertFalse("got a 2 in another group", sawOneAgain);
sawTwo = true;
} else {
fail("v=" + v);
}
}
assertTrue("didn't see any 2s", sawTwo);
}
Aggregations