use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class JobRestartWithSnapshotTest method when_jobRestartedGracefully_then_noOutputDuplicated.
@Test
public void when_jobRestartedGracefully_then_noOutputDuplicated() {
DAG dag = new DAG();
int elementsInPartition = 100;
SupplierEx<Processor> sup = () -> new SequencesInPartitionsGeneratorP(3, elementsInPartition, true);
Vertex generator = dag.newVertex("generator", throttle(sup, 30)).localParallelism(1);
Vertex sink = dag.newVertex("sink", writeListP("sink"));
dag.edge(between(generator, sink));
JobConfig config = new JobConfig();
config.setProcessingGuarantee(EXACTLY_ONCE);
// set long interval so that the first snapshot does not execute
config.setSnapshotIntervalMillis(3600_000);
Job job = instance1.getJet().newJob(dag, config);
// wait for the job to start producing output
List<Entry<Integer, Integer>> sinkList = instance1.getList("sink");
assertTrueEventually(() -> assertTrue(sinkList.size() > 10));
// When
job.restart();
job.join();
// Then
Set<Entry<Integer, Integer>> expected = IntStream.range(0, elementsInPartition).boxed().flatMap(i -> IntStream.range(0, 3).mapToObj(p -> entry(p, i))).collect(Collectors.toSet());
assertEquals(expected, new HashSet<>(sinkList));
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class JobRestartWithSnapshotTest method when_snapshotStartedBeforeExecution_then_firstSnapshotIsSuccessful.
@Test
public void when_snapshotStartedBeforeExecution_then_firstSnapshotIsSuccessful() {
// instance1 is always coordinator
// delay ExecuteOperation so that snapshot is started before execution is started on the worker member
delayOperationsFrom(instance1, JetInitDataSerializerHook.FACTORY_ID, singletonList(JetInitDataSerializerHook.START_EXECUTION_OP));
DAG dag = new DAG();
dag.newVertex("p", FirstSnapshotProcessor::new).localParallelism(1);
JobConfig config = new JobConfig();
config.setProcessingGuarantee(EXACTLY_ONCE);
config.setSnapshotIntervalMillis(0);
Job job = instance1.getJet().newJob(dag, config);
JobRepository repository = new JobRepository(instance1);
// the first snapshot should succeed
assertTrueEventually(() -> {
JobExecutionRecord record = repository.getJobExecutionRecord(job.getId());
assertNotNull("null JobRecord", record);
assertEquals(0, record.snapshotId());
}, 30);
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class ExecutionLifecycleTest method when_executionCancelledBeforeStart_then_jobFutureIsCancelledOnExecute.
@Test
public void when_executionCancelledBeforeStart_then_jobFutureIsCancelledOnExecute() {
// not applicable to light jobs - we hack around with ExecutionContext
assumeFalse(useLightJob);
// Given
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(NoOutputSourceP::new, MEMBER_COUNT)));
NodeEngineImpl nodeEngineImpl = getNodeEngineImpl(instance());
Address localAddress = nodeEngineImpl.getThisAddress();
ClusterServiceImpl clusterService = (ClusterServiceImpl) nodeEngineImpl.getClusterService();
MembersView membersView = clusterService.getMembershipManager().getMembersView();
int memberListVersion = membersView.getVersion();
JetServiceBackend jetServiceBackend = getJetServiceBackend(instance());
long jobId = 0;
long executionId = 1;
JobConfig jobConfig = new JobConfig();
final Map<MemberInfo, ExecutionPlan> executionPlans = ExecutionPlanBuilder.createExecutionPlans(nodeEngineImpl, membersView.getMembers(), dag, jobId, executionId, jobConfig, NO_SNAPSHOT, false, null);
ExecutionPlan executionPlan = executionPlans.get(membersView.getMember(localAddress));
jetServiceBackend.getJobClassLoaderService().getOrCreateClassLoader(jobConfig, jobId, COORDINATOR);
Set<MemberInfo> participants = new HashSet<>(membersView.getMembers());
jetServiceBackend.getJobExecutionService().initExecution(jobId, executionId, localAddress, memberListVersion, participants, executionPlan);
ExecutionContext executionContext = jetServiceBackend.getJobExecutionService().getExecutionContext(executionId);
executionContext.terminateExecution(null);
// When
CompletableFuture<Void> future = executionContext.beginExecution(jetServiceBackend.getTaskletExecutionService());
// Then
expectedException.expect(CancellationException.class);
future.join();
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class OperationLossTest method when_terminalSnapshotOperationLost_then_jobRestarts.
@Test
public void when_terminalSnapshotOperationLost_then_jobRestarts() {
PacketFiltersUtil.dropOperationsFrom(instance(), JetInitDataSerializerHook.FACTORY_ID, singletonList(JetInitDataSerializerHook.SNAPSHOT_PHASE1_OPERATION));
DAG dag = new DAG();
Vertex v1 = dag.newVertex("v1", () -> new NoOutputSourceP()).localParallelism(1);
Vertex v2 = dag.newVertex("v2", mapP(identity())).localParallelism(1);
dag.edge(between(v1, v2).distributed());
Job job = instance().getJet().newJob(dag, new JobConfig().setProcessingGuarantee(EXACTLY_ONCE));
assertJobStatusEventually(job, RUNNING, 20);
job.restart();
// sleep so that the SnapshotOperation is sent out, but lost
sleepSeconds(1);
// reset filters so that the situation can resolve
PacketFiltersUtil.resetPacketFiltersFrom(instance());
// Then
assertTrueEventually(() -> assertEquals(4, NoOutputSourceP.initCount.get()));
NoOutputSourceP.proceedLatch.countDown();
job.join();
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class OperationLossTest method when_snapshotOperationLost_then_retried.
@Test
public void when_snapshotOperationLost_then_retried() {
PacketFiltersUtil.dropOperationsFrom(instance(), JetInitDataSerializerHook.FACTORY_ID, singletonList(JetInitDataSerializerHook.SNAPSHOT_PHASE1_OPERATION));
DAG dag = new DAG();
Vertex v1 = dag.newVertex("v1", () -> new DummyStatefulP()).localParallelism(1);
Vertex v2 = dag.newVertex("v2", mapP(identity())).localParallelism(1);
dag.edge(between(v1, v2).distributed());
Job job = instance().getJet().newJob(dag, new JobConfig().setProcessingGuarantee(EXACTLY_ONCE).setSnapshotIntervalMillis(100));
assertJobStatusEventually(job, RUNNING);
JobRepository jobRepository = new JobRepository(instance());
assertTrueEventually(() -> {
JobExecutionRecord record = jobRepository.getJobExecutionRecord(job.getId());
assertNotNull("null JobExecutionRecord", record);
assertEquals("ongoingSnapshotId", 0, record.ongoingSnapshotId());
}, 20);
sleepSeconds(1);
// now lift the filter and check that a snapshot is done
logger.info("Lifting the packet filter...");
PacketFiltersUtil.resetPacketFiltersFrom(instance());
waitForFirstSnapshot(jobRepository, job.getId(), 10, false);
cancelAndJoin(job);
}
Aggregations