use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class SplitBrainTest method when_jobIsSubmittedToMinoritySide_then_jobIsCancelledDuringMerge.
@Test
public void when_jobIsSubmittedToMinoritySide_then_jobIsCancelledDuringMerge() {
int firstSubClusterSize = 3;
int secondSubClusterSize = 2;
NoOutputSourceP.executionStarted = new CountDownLatch(secondSubClusterSize * PARALLELISM);
Job[] jobRef = new Job[1];
BiConsumer<HazelcastInstance[], HazelcastInstance[]> onSplit = (firstSubCluster, secondSubCluster) -> {
MockPS processorSupplier = new MockPS(NoOutputSourceP::new, secondSubClusterSize);
DAG dag = new DAG().vertex(new Vertex("test", processorSupplier));
jobRef[0] = secondSubCluster[0].getJet().newJob(dag, new JobConfig().setSplitBrainProtection(true));
assertOpenEventually(NoOutputSourceP.executionStarted);
};
Consumer<HazelcastInstance[]> afterMerge = instances -> {
assertTrueEventually(() -> assertEquals(secondSubClusterSize, MockPS.receivedCloseErrors.size()), 20);
MockPS.receivedCloseErrors.forEach(t -> assertTrue("received: " + t, t instanceof CancellationException));
try {
jobRef[0].getFuture().get(30, TimeUnit.SECONDS);
fail();
} catch (CancellationException ignored) {
} catch (Exception e) {
throw new AssertionError(e);
}
};
testSplitBrain(firstSubClusterSize, secondSubClusterSize, null, onSplit, afterMerge);
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class SplitBrainTest method when_newMemberJoinsToCluster_then_jobQuorumSizeIsUpdated.
@Test
public void when_newMemberJoinsToCluster_then_jobQuorumSizeIsUpdated() {
int clusterSize = 3;
HazelcastInstance[] instances = new HazelcastInstance[clusterSize];
for (int i = 0; i < clusterSize; i++) {
instances[i] = createHazelcastInstance(createConfig());
}
NoOutputSourceP.executionStarted = new CountDownLatch(clusterSize * PARALLELISM);
MockPS processorSupplier = new MockPS(NoOutputSourceP::new, clusterSize);
DAG dag = new DAG().vertex(new Vertex("test", processorSupplier).localParallelism(PARALLELISM));
Job job = instances[0].getJet().newJob(dag, new JobConfig().setSplitBrainProtection(true));
assertOpenEventually(NoOutputSourceP.executionStarted);
createHazelcastInstance(createConfig());
assertTrueEventually(() -> {
JetServiceBackend service = getJetServiceBackend(instances[0]);
JobRepository jobRepository = service.getJobRepository();
JobExecutionRecord record = jobRepository.getJobExecutionRecord(job.getId());
assertEquals(3, record.getQuorumSize());
MasterContext masterContext = service.getJobCoordinationService().getMasterContext(job.getId());
assertEquals(3, masterContext.jobExecutionRecord().getQuorumSize());
});
NoOutputSourceP.proceedLatch.countDown();
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class SplitBrainTest method when_quorumIsLostOnMinority_then_jobDoesNotRestartOnMinorityAndCancelledAfterMerge.
@Test
public void when_quorumIsLostOnMinority_then_jobDoesNotRestartOnMinorityAndCancelledAfterMerge() {
int firstSubClusterSize = 3;
int secondSubClusterSize = 2;
int clusterSize = firstSubClusterSize + secondSubClusterSize;
NoOutputSourceP.executionStarted = new CountDownLatch(clusterSize * PARALLELISM);
Job[] jobRef = new Job[1];
Consumer<HazelcastInstance[]> beforeSplit = instances -> {
MockPS processorSupplier = new MockPS(NoOutputSourceP::new, clusterSize);
DAG dag = new DAG().vertex(new Vertex("test", processorSupplier));
jobRef[0] = instances[0].getJet().newJob(dag, new JobConfig().setSplitBrainProtection(true));
assertOpenEventually(NoOutputSourceP.executionStarted);
};
Future[] minorityJobFutureRef = new Future[1];
BiConsumer<HazelcastInstance[], HazelcastInstance[]> onSplit = (firstSubCluster, secondSubCluster) -> {
NoOutputSourceP.proceedLatch.countDown();
assertTrueEventually(() -> assertEquals(clusterSize + firstSubClusterSize, MockPS.initCount.get()));
long jobId = jobRef[0].getId();
assertTrueEventually(() -> {
JetServiceBackend service = getJetServiceBackend(firstSubCluster[0]);
assertEquals(COMPLETED, service.getJobCoordinationService().getJobStatus(jobId).get());
});
JetServiceBackend service2 = getJetServiceBackend(secondSubCluster[0]);
assertTrueEventually(() -> {
MasterContext masterContext = service2.getJobCoordinationService().getMasterContext(jobId);
assertNotNull(masterContext);
minorityJobFutureRef[0] = masterContext.jobContext().jobCompletionFuture();
});
assertTrueAllTheTime(() -> {
assertStatusNotRunningOrStarting(service2.getJobCoordinationService().getJobStatus(jobId).get());
}, 20);
};
Consumer<HazelcastInstance[]> afterMerge = instances -> {
assertTrueEventually(() -> {
assertEquals(clusterSize + firstSubClusterSize, MockPS.initCount.get());
assertEquals(clusterSize + firstSubClusterSize, MockPS.closeCount.get());
});
assertEquals(clusterSize, MockPS.receivedCloseErrors.size());
MockPS.receivedCloseErrors.forEach(t -> assertTrue("received " + t, t instanceof CancellationException));
try {
minorityJobFutureRef[0].get();
fail();
} catch (CancellationException expected) {
} catch (Exception e) {
throw new AssertionError(e);
}
};
testSplitBrain(firstSubClusterSize, secondSubClusterSize, beforeSplit, onSplit, afterMerge);
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class TerminalSnapshotSynchronizationTest method setup.
private Job setup(boolean snapshotting) {
HazelcastInstance[] instances = createHazelcastInstances(NODE_COUNT);
DAG dag = new DAG();
dag.newVertex("generator", () -> new NoOutputSourceP()).localParallelism(1);
JobConfig config = new JobConfig().setProcessingGuarantee(snapshotting ? EXACTLY_ONCE : NONE).setSnapshotIntervalMillis(DAYS.toMillis(1));
Job job = instances[0].getJet().newJob(dag, config);
assertJobStatusEventually(job, RUNNING);
return job;
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class TopologyChangeTest method when_nodeIsNotJobParticipant_then_initFails.
@Test
public void when_nodeIsNotJobParticipant_then_initFails() throws Throwable {
final long jobId = 1;
final long executionId = 1;
HazelcastInstance master = instances[0];
int memberListVersion = Accessors.getClusterService(master).getMemberListVersion();
Set<MemberInfo> memberInfos = new HashSet<>();
for (int i = 1; i < instances.length; i++) {
memberInfos.add(new MemberInfo(getNode(instances[i]).getLocalMember()));
}
Version version = instances[0].getCluster().getLocalMember().getVersion().asVersion();
JobRecord jobRecord = new JobRecord(version, jobId, null, "", new JobConfig(), Collections.emptySet(), null);
instances[0].getMap(JOB_RECORDS_MAP_NAME).put(jobId, jobRecord);
InitExecutionOperation op = new InitExecutionOperation(jobId, executionId, memberListVersion, version, memberInfos, null, false);
Future<Object> future = Accessors.getOperationService(master).createInvocationBuilder(JetServiceBackend.SERVICE_NAME, op, Accessors.getAddress(master)).invoke();
try {
future.get();
fail();
} catch (ExecutionException e) {
assertInstanceOf(IllegalArgumentException.class, e.getCause());
assertTrue("Expected: contains 'is not in participants'\nActual: '" + e.getMessage() + "'", e.getMessage().contains("is not in participants"));
}
}
Aggregations