use of com.hazelcast.jet.core.TestProcessors.MockPS in project hazelcast-jet by hazelcast.
the class TopologyChangeTest method when_nonCoordinatorLeavesDuringExecution_then_jobRestarts.
@Test
public void when_nonCoordinatorLeavesDuringExecution_then_jobRestarts() throws Throwable {
// Given
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(StuckProcessor::new, nodeCount)));
// When
Job job = instances[0].newJob(dag);
StuckProcessor.executionStarted.await();
instances[2].getHazelcastInstance().getLifecycleService().terminate();
StuckProcessor.proceedLatch.countDown();
job.join();
// upon non-coordinator member leave, remaining members restart and complete the job
final int count = nodeCount * 2 - 1;
assertEquals(count, MockPS.initCount.get());
assertTrueEventually(() -> {
assertEquals(count, MockPS.closeCount.get());
assertEquals(nodeCount, MockPS.receivedCloseErrors.size());
for (int i = 0; i < MockPS.receivedCloseErrors.size(); i++) {
Throwable error = MockPS.receivedCloseErrors.get(i);
assertTrue(error instanceof TopologyChangedException || error instanceof HazelcastInstanceNotActiveException);
}
});
}
use of com.hazelcast.jet.core.TestProcessors.MockPS in project hazelcast-jet by hazelcast.
the class TopologyChangeTest method when_nodeIsShuttingDownDuringInit_then_jobRestarts.
@Test
public void when_nodeIsShuttingDownDuringInit_then_jobRestarts() {
// Given that newInstance will have a long shutdown process
for (JetInstance instance : instances) {
warmUpPartitions(instance.getHazelcastInstance());
}
dropOperationsBetween(instances[2].getHazelcastInstance(), instances[0].getHazelcastInstance(), PartitionDataSerializerHook.F_ID, singletonList(SHUTDOWN_REQUEST));
rejectOperationsBetween(instances[0].getHazelcastInstance(), instances[2].getHazelcastInstance(), JetInitDataSerializerHook.FACTORY_ID, singletonList(INIT_EXECUTION_OP));
// When a job participant starts its shutdown after the job is submitted
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(TestProcessors.Identity::new, nodeCount - 1)));
Job job = instances[0].newJob(dag);
JetService jetService = getJetService(instances[0]);
assertTrueEventually(() -> assertFalse(jetService.getJobCoordinationService().getMasterContexts().isEmpty()));
spawn(instances[2]::shutdown);
// Then, it restarts until the shutting down node is gone
assertTrueEventually(() -> assertEquals(STARTING, job.getStatus()));
assertTrueAllTheTime(() -> assertEquals(STARTING, job.getStatus()), 5);
resetPacketFiltersFrom(instances[2].getHazelcastInstance());
job.join();
}
use of com.hazelcast.jet.core.TestProcessors.MockPS in project hazelcast by hazelcast.
the class Job_SeparateClusterTest method stressTest_getJobStatus.
private void stressTest_getJobStatus(Supplier<HazelcastInstance> submitterSupplier) throws Exception {
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(NoOutputSourceP::new, NODE_COUNT * 2)));
AtomicReference<Job> job = new AtomicReference<>(submitterSupplier.get().getJet().newJob(dag));
AtomicBoolean done = new AtomicBoolean();
List<Runnable> actions = asList(() -> job.get().getStatus(), () -> job.get().getMetrics(), () -> job.get().getConfig());
List<Future> checkerFutures = new ArrayList<>();
for (Runnable action : actions) {
checkerFutures.add(spawn(() -> {
while (!done.get()) {
action.run();
}
}));
}
for (int i = 0; i < 5; i++) {
instance1.shutdown();
instance1 = createHazelcastInstance();
job.set(submitterSupplier.get().getJet().getJob(job.get().getId()));
assertJobStatusEventually(job.get(), RUNNING);
instance2.shutdown();
instance2 = createHazelcastInstance();
job.set(submitterSupplier.get().getJet().getJob(job.get().getId()));
assertJobStatusEventually(job.get(), RUNNING);
sleepSeconds(1);
if (checkerFutures.stream().anyMatch(Future::isDone)) {
break;
}
}
done.set(true);
for (Future future : checkerFutures) {
future.get();
}
}
use of com.hazelcast.jet.core.TestProcessors.MockPS in project hazelcast by hazelcast.
the class Job_SeparateClusterTest method when_joinFromClientTimesOut_then_futureShouldNotBeCompletedEarly.
@Test
public void when_joinFromClientTimesOut_then_futureShouldNotBeCompletedEarly() throws InterruptedException {
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(NoOutputSourceP::new, NODE_COUNT)));
int timeoutSecs = 1;
ClientConfig config = new ClientConfig().setProperty(ClientProperty.INVOCATION_TIMEOUT_SECONDS.getName(), Integer.toString(timeoutSecs));
HazelcastInstance client = createHazelcastClient(config);
// join request is sent along with job submission
Job job = client.getJet().newJob(dag);
NoOutputSourceP.executionStarted.await();
// wait for join invocation to timeout
Thread.sleep(TimeUnit.SECONDS.toMillis(timeoutSecs));
// When
NoOutputSourceP.initCount.set(0);
instance1.getLifecycleService().terminate();
// wait for job to be restarted on remaining node
assertTrueEventually(() -> assertEquals(LOCAL_PARALLELISM, NoOutputSourceP.initCount.get()));
RuntimeException ex = new RuntimeException("Faulty job");
NoOutputSourceP.failure.set(ex);
// Then
expectedException.expectMessage(Matchers.containsString(ex.getMessage()));
job.join();
}
use of com.hazelcast.jet.core.TestProcessors.MockPS in project hazelcast by hazelcast.
the class SplitBrainTest method when_newMemberJoinsToCluster_then_jobQuorumSizeIsUpdated.
@Test
public void when_newMemberJoinsToCluster_then_jobQuorumSizeIsUpdated() {
int clusterSize = 3;
HazelcastInstance[] instances = new HazelcastInstance[clusterSize];
for (int i = 0; i < clusterSize; i++) {
instances[i] = createHazelcastInstance(createConfig());
}
NoOutputSourceP.executionStarted = new CountDownLatch(clusterSize * PARALLELISM);
MockPS processorSupplier = new MockPS(NoOutputSourceP::new, clusterSize);
DAG dag = new DAG().vertex(new Vertex("test", processorSupplier).localParallelism(PARALLELISM));
Job job = instances[0].getJet().newJob(dag, new JobConfig().setSplitBrainProtection(true));
assertOpenEventually(NoOutputSourceP.executionStarted);
createHazelcastInstance(createConfig());
assertTrueEventually(() -> {
JetServiceBackend service = getJetServiceBackend(instances[0]);
JobRepository jobRepository = service.getJobRepository();
JobExecutionRecord record = jobRepository.getJobExecutionRecord(job.getId());
assertEquals(3, record.getQuorumSize());
MasterContext masterContext = service.getJobCoordinationService().getMasterContext(job.getId());
assertEquals(3, masterContext.jobExecutionRecord().getQuorumSize());
});
NoOutputSourceP.proceedLatch.countDown();
}
Aggregations