use of com.hazelcast.jet.impl.JobRepository in project hazelcast by hazelcast.
the class PostgresCdcIntegrationTest method stressTest_exactlyOnce.
public void stressTest_exactlyOnce(boolean graceful) throws Exception {
int updates = 1000;
int restarts = 10;
int snapshotIntervalMs = 100;
Pipeline pipeline = customersPipeline(Long.MAX_VALUE);
JobConfig config = new JobConfig().setProcessingGuarantee(ProcessingGuarantee.EXACTLY_ONCE).setSnapshotIntervalMillis(snapshotIntervalMs);
HazelcastInstance hz = createHazelcastInstances(2)[0];
Job job = hz.getJet().newJob(pipeline, config);
JetTestSupport.assertJobStatusEventually(job, JobStatus.RUNNING);
assertEqualsEventually(() -> hz.getMap("results").size(), 4);
// make sure the job stores a Postgres WAL offset so that it won't trigger database snapshots after any restart
// multiple snapshots are a problem for this test, because it is updating the same row, so subsequent snapshots
// will return different images
JobRepository jr = new JobRepository(hz);
waitForNextSnapshot(jr, job.getId(), 20, false);
String lsnFlushedBeforeRestart = getConfirmedFlushLsn();
Future<?> dbChangesFuture = spawn(() -> uncheckRun(() -> {
for (int i = 1; i <= updates; i++) {
executeBatch("UPDATE customers SET first_name='Anne" + i + "' WHERE id=1004");
}
}));
for (int i = 0; i < restarts; i++) {
((JobProxy) job).restart(graceful);
assertJobStatusEventually(job, RUNNING);
Thread.sleep(ThreadLocalRandom.current().nextInt(snapshotIntervalMs * 2));
}
JetTestSupport.assertJobStatusEventually(job, JobStatus.RUNNING);
try {
List<String> expectedPatterns = new ArrayList<>(Arrays.asList("1001/00000:SYNC:Customer \\{id=1001, firstName=Sally, lastName=Thomas, " + "email=sally.thomas@acme.com\\}", "1002/00000:SYNC:Customer \\{id=1002, firstName=George, lastName=Bailey, " + "email=gbailey@foobar.com\\}", "1003/00000:SYNC:Customer \\{id=1003, firstName=Edward, lastName=Walker, " + "email=ed@walker.com\\}", "1004/00000:SYNC:Customer \\{id=1004, firstName=Anne, lastName=Kretchmar, " + "email=annek@noanswer.org\\}"));
for (int i = 1; i <= updates; i++) {
expectedPatterns.add("1004/" + format("%05d", i) + ":UPDATE:Customer \\{id=1004, firstName=Anne" + i + ", lastName=Kretchmar, email=annek@noanswer.org\\}");
}
assertTrueEventually(() -> assertMatch(expectedPatterns, mapResultsToSortedList(hz.getMap("results"))));
assertTrueEventually(() -> assertNotEquals(lsnFlushedBeforeRestart, getConfirmedFlushLsn()));
} finally {
job.cancel();
assertJobStatusEventually(job, JobStatus.FAILED);
dbChangesFuture.get();
}
}
use of com.hazelcast.jet.impl.JobRepository in project hazelcast by hazelcast.
the class JobExecutionMetricsTest method testExecutionMetricsSuspendResumeWithSnapshot.
@Test
public void testExecutionMetricsSuspendResumeWithSnapshot() throws Exception {
JobConfig jobConfig = new JobConfig();
jobConfig.setProcessingGuarantee(ProcessingGuarantee.EXACTLY_ONCE).setSnapshotIntervalMillis(50);
Job job = instance().getJet().newJob(snapshotPipeline(), jobConfig);
JobRepository jr = new JobRepository(instance());
waitForFirstSnapshot(jr, job.getId(), 20, false);
JobMetricsChecker jobChecker = new JobMetricsChecker(job);
assertTrueEventually(() -> jobChecker.assertSummedMetricValueAtLeast(EXECUTION_START_TIME, 1));
JmxMetricsChecker jmxChecker = new JmxMetricsChecker(instance().getName(), job);
long executionStartTime = jmxChecker.assertMetricValueAtLeast(EXECUTION_START_TIME, 1);
jmxChecker.assertMetricValue(EXECUTION_COMPLETION_TIME, JOB_HAS_NOT_FINISHED_YET_TIME);
job.restart();
jmxChecker.assertMetricValue(EXECUTION_START_TIME, executionStartTime);
jmxChecker.assertMetricValue(EXECUTION_COMPLETION_TIME, JOB_HAS_NOT_FINISHED_YET_TIME);
}
use of com.hazelcast.jet.impl.JobRepository in project hazelcast by hazelcast.
the class JobSnapshotMetricsTest method when_snapshotCreated_then_snapshotMetricsAreNotEmpty.
@Test
public void when_snapshotCreated_then_snapshotMetricsAreNotEmpty() throws Exception {
JobConfig jobConfig = new JobConfig();
jobConfig.setProcessingGuarantee(ProcessingGuarantee.EXACTLY_ONCE).setSnapshotIntervalMillis(50);
Job job = instance().getJet().newJob(pipeline(), jobConfig);
JobRepository jr = new JobRepository(instance());
waitForFirstSnapshot(jr, job.getId(), 20, false);
JobMetricsChecker checker = new JobMetricsChecker(job);
assertTrueEventually(() -> checker.assertSummedMetricValueAtLeast(MetricNames.SNAPSHOT_KEYS, 1));
assertSnapshotMBeans(job, SOURCE_VERTEX_NAME, 1, true);
}
use of com.hazelcast.jet.impl.JobRepository in project hazelcast by hazelcast.
the class MetricsTest method test_sourceSinkTag.
@Test
public void test_sourceSinkTag() {
DAG dag = new DAG();
Vertex src = dag.newVertex("src", () -> new NoOutputSourceP());
Vertex mid = dag.newVertex("mid", Processors.mapP(identity()));
Vertex sink = dag.newVertex("sink", Processors.noopP());
dag.edge(Edge.between(src, mid));
dag.edge(Edge.between(mid, sink));
Job job = instance.getJet().newJob(dag, new JobConfig().setProcessingGuarantee(EXACTLY_ONCE).setSnapshotIntervalMillis(100));
assertJobStatusEventually(job, RUNNING);
JobMetrics[] metrics = { null };
assertTrueEventually(() -> assertNotEquals(0, (metrics[0] = job.getMetrics()).metrics().size()));
assertSourceSinkTags(metrics[0], "src", true, true, false);
assertSourceSinkTags(metrics[0], "mid", true, false, false);
assertSourceSinkTags(metrics[0], "sink", true, false, true);
// restart after a snapshot so that the job will restart from a snapshot. Check the source/sink tags afterwards.
waitForFirstSnapshot(new JobRepository(instance), job.getId(), 10, true);
job.restart();
assertJobStatusEventually(job, RUNNING);
assertTrueEventually(() -> assertNotEquals(0, (metrics[0] = job.getMetrics()).metrics().size()));
assertSourceSinkTags(metrics[0], "src", false, true, false);
assertSourceSinkTags(metrics[0], "mid", false, false, false);
assertSourceSinkTags(metrics[0], "sink", false, false, true);
}
use of com.hazelcast.jet.impl.JobRepository in project hazelcast by hazelcast.
the class PostponedSnapshotTestBase method startJob.
// used by subclass in jet-enterprise
@SuppressWarnings("WeakerAccess")
protected Job startJob(long snapshotInterval) {
DAG dag = new DAG();
Vertex highPrioritySource = dag.newVertex("highPrioritySource", () -> new SourceP(0)).localParallelism(1);
Vertex lowPrioritySource = dag.newVertex("lowPrioritySource", () -> new SourceP(1)).localParallelism(1);
Vertex sink = dag.newVertex("sink", writeLoggerP());
dag.edge(between(highPrioritySource, sink).priority(-1)).edge(from(lowPrioritySource).to(sink, 1));
JobConfig config = new JobConfig();
config.setProcessingGuarantee(ProcessingGuarantee.EXACTLY_ONCE);
config.setSnapshotIntervalMillis(snapshotInterval);
Job job = instance.getJet().newJob(dag, config);
JobRepository jr = new JobRepository(instance);
// check, that snapshot starts, but stays in ONGOING state
if (snapshotInterval < 1000) {
assertTrueEventually(() -> {
JobExecutionRecord record = jr.getJobExecutionRecord(job.getId());
assertNotNull("record is null", record);
assertTrue(record.ongoingSnapshotId() >= 0);
}, 5);
assertTrueAllTheTime(() -> {
JobExecutionRecord record = jr.getJobExecutionRecord(job.getId());
assertTrue(record.ongoingSnapshotId() >= 0);
assertTrue("snapshotId=" + record.snapshotId(), record.snapshotId() < 0);
}, 2);
} else {
assertJobStatusEventually(job, RUNNING);
}
return job;
}
Aggregations