use of com.hazelcast.jet.core.TestProcessors.StuckProcessor in project hazelcast-jet by hazelcast.
the class JobTest method when_jobIsCancelled_then_jobStatusIsCompletedEventually.
@Test
public void when_jobIsCancelled_then_jobStatusIsCompletedEventually() throws InterruptedException {
// Given
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(StuckProcessor::new, NODE_COUNT)));
// When
Job job = instance1.newJob(dag);
StuckProcessor.executionStarted.await();
// Then
job.cancel();
joinAndExpectCancellation(job);
StuckProcessor.proceedLatch.countDown();
assertCompletedEventually(job);
}
use of com.hazelcast.jet.core.TestProcessors.StuckProcessor in project hazelcast-jet by hazelcast.
the class JobTest method when_jobsAreCompleted_then_lastSubmittedJobIsQueriedByName.
@Test
public void when_jobsAreCompleted_then_lastSubmittedJobIsQueriedByName() {
// Given
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(StuckProcessor::new, NODE_COUNT * 2)));
JobConfig config = new JobConfig();
String jobName = "job1";
config.setName(jobName);
// When
Job job1 = instance1.newJob(dag, config);
sleepAtLeastMillis(1);
Job job2 = instance1.newJob(dag, config);
StuckProcessor.proceedLatch.countDown();
job1.join();
job2.join();
// Then
Job trackedJob = instance1.getJob(jobName);
assertNotNull(trackedJob);
assertEquals(jobName, trackedJob.getName());
assertNotEquals(job1.getId(), trackedJob.getId());
assertEquals(job2.getId(), trackedJob.getId());
assertEquals(COMPLETED, trackedJob.getStatus());
}
use of com.hazelcast.jet.core.TestProcessors.StuckProcessor in project hazelcast-jet by hazelcast.
the class JobTest method when_jobIsCancelled_then_trackedJobCanQueryJobResult.
@Test
public void when_jobIsCancelled_then_trackedJobCanQueryJobResult() throws InterruptedException {
// Given
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(StuckProcessor::new, NODE_COUNT)));
// When
Job submittedJob = instance1.newJob(dag);
StuckProcessor.executionStarted.await();
Collection<Job> trackedJobs = instance2.getJobs();
assertEquals(1, trackedJobs.size());
Job trackedJob = trackedJobs.iterator().next();
submittedJob.cancel();
// Then
joinAndExpectCancellation(trackedJob);
StuckProcessor.proceedLatch.countDown();
assertCompletedEventually(trackedJob);
}
use of com.hazelcast.jet.core.TestProcessors.StuckProcessor in project hazelcast-jet by hazelcast.
the class JobTest method when_trackedJobCancels_then_jobCompletes.
@Test
public void when_trackedJobCancels_then_jobCompletes() {
// Given
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(StuckProcessor::new, NODE_COUNT)));
Job submittedJob = instance1.newJob(dag);
Collection<Job> trackedJobs = instance2.getJobs();
assertEquals(1, trackedJobs.size());
Job trackedJob = trackedJobs.iterator().next();
// When
trackedJob.cancel();
// Then
joinAndExpectCancellation(trackedJob);
joinAndExpectCancellation(submittedJob);
StuckProcessor.proceedLatch.countDown();
assertCompletedEventually(trackedJob);
assertCompletedEventually(submittedJob);
}
use of com.hazelcast.jet.core.TestProcessors.StuckProcessor in project hazelcast-jet by hazelcast.
the class SplitBrainTest method when_jobIsSubmittedToMinoritySide_then_jobIsCancelledDuringMerge.
@Test
public void when_jobIsSubmittedToMinoritySide_then_jobIsCancelledDuringMerge() {
int firstSubClusterSize = 3;
int secondSubClusterSize = 2;
int clusterSize = firstSubClusterSize + secondSubClusterSize;
StuckProcessor.executionStarted = new CountDownLatch(secondSubClusterSize * PARALLELISM);
Job[] jobRef = new Job[1];
BiConsumer<JetInstance[], JetInstance[]> onSplit = (firstSubCluster, secondSubCluster) -> {
MockPS processorSupplier = new MockPS(StuckProcessor::new, clusterSize);
DAG dag = new DAG().vertex(new Vertex("test", processorSupplier));
jobRef[0] = secondSubCluster[1].newJob(dag, new JobConfig().setSplitBrainProtection(true));
assertOpenEventually(StuckProcessor.executionStarted);
};
Consumer<JetInstance[]> afterMerge = instances -> {
assertEquals(secondSubClusterSize, MockPS.receivedCloseErrors.size());
MockPS.receivedCloseErrors.forEach(t -> assertTrue(t instanceof TopologyChangedException));
try {
jobRef[0].getFuture().get(30, TimeUnit.SECONDS);
fail();
} catch (CancellationException ignored) {
} catch (Exception e) {
throw new AssertionError(e);
}
};
testSplitBrain(firstSubClusterSize, secondSubClusterSize, null, onSplit, afterMerge);
}
Aggregations