use of com.hazelcast.jet.impl.JetServiceBackend in project hazelcast by hazelcast.
the class SnapshotPhase2Operation method doRun.
@Override
protected CompletableFuture<Void> doRun() {
JetServiceBackend service = getJetServiceBackend();
ExecutionContext ctx = service.getJobExecutionService().assertExecutionContext(getCallerAddress(), jobId(), executionId, getClass().getSimpleName());
assert !ctx.isLightJob() : "snapshot phase 2 started on a light job: " + idToString(executionId);
return ctx.beginSnapshotPhase2(snapshotId, success).whenComplete((r, t) -> {
if (t != null) {
getLogger().warning(String.format("Snapshot %d phase 2 for %s finished with an error on member: %s", snapshotId, ctx.jobNameAndExecutionId(), t), t);
} else {
logFine(getLogger(), "Snapshot %s phase 2 for %s finished successfully on member", snapshotId, ctx.jobNameAndExecutionId());
}
});
}
use of com.hazelcast.jet.impl.JetServiceBackend in project hazelcast by hazelcast.
the class GetLocalJobMetricsOperation method run.
@Override
public void run() {
JetServiceBackend service = getJetServiceBackend();
ExecutionContext executionContext = service.getJobExecutionService().getExecutionContext(executionId);
if (executionContext == null) {
throw new ExecutionNotFoundException(executionId);
}
response = executionContext.getJobMetrics();
}
use of com.hazelcast.jet.impl.JetServiceBackend in project hazelcast by hazelcast.
the class TerminateExecutionOperation method run.
@Override
public void run() {
JetServiceBackend service = getJetServiceBackend();
JobExecutionService executionService = service.getJobExecutionService();
Address callerAddress = getCallerAddress();
executionService.terminateExecution(jobId(), executionId, callerAddress, mode);
}
use of com.hazelcast.jet.impl.JetServiceBackend in project hazelcast by hazelcast.
the class SplitBrainTest method when_newMemberJoinsToCluster_then_jobQuorumSizeIsUpdated.
@Test
public void when_newMemberJoinsToCluster_then_jobQuorumSizeIsUpdated() {
int clusterSize = 3;
HazelcastInstance[] instances = new HazelcastInstance[clusterSize];
for (int i = 0; i < clusterSize; i++) {
instances[i] = createHazelcastInstance(createConfig());
}
NoOutputSourceP.executionStarted = new CountDownLatch(clusterSize * PARALLELISM);
MockPS processorSupplier = new MockPS(NoOutputSourceP::new, clusterSize);
DAG dag = new DAG().vertex(new Vertex("test", processorSupplier).localParallelism(PARALLELISM));
Job job = instances[0].getJet().newJob(dag, new JobConfig().setSplitBrainProtection(true));
assertOpenEventually(NoOutputSourceP.executionStarted);
createHazelcastInstance(createConfig());
assertTrueEventually(() -> {
JetServiceBackend service = getJetServiceBackend(instances[0]);
JobRepository jobRepository = service.getJobRepository();
JobExecutionRecord record = jobRepository.getJobExecutionRecord(job.getId());
assertEquals(3, record.getQuorumSize());
MasterContext masterContext = service.getJobCoordinationService().getMasterContext(job.getId());
assertEquals(3, masterContext.jobExecutionRecord().getQuorumSize());
});
NoOutputSourceP.proceedLatch.countDown();
}
use of com.hazelcast.jet.impl.JetServiceBackend in project hazelcast by hazelcast.
the class SplitBrainTest method when_quorumIsLostOnMinority_then_jobDoesNotRestartOnMinorityAndCancelledAfterMerge.
@Test
public void when_quorumIsLostOnMinority_then_jobDoesNotRestartOnMinorityAndCancelledAfterMerge() {
int firstSubClusterSize = 3;
int secondSubClusterSize = 2;
int clusterSize = firstSubClusterSize + secondSubClusterSize;
NoOutputSourceP.executionStarted = new CountDownLatch(clusterSize * PARALLELISM);
Job[] jobRef = new Job[1];
Consumer<HazelcastInstance[]> beforeSplit = instances -> {
MockPS processorSupplier = new MockPS(NoOutputSourceP::new, clusterSize);
DAG dag = new DAG().vertex(new Vertex("test", processorSupplier));
jobRef[0] = instances[0].getJet().newJob(dag, new JobConfig().setSplitBrainProtection(true));
assertOpenEventually(NoOutputSourceP.executionStarted);
};
Future[] minorityJobFutureRef = new Future[1];
BiConsumer<HazelcastInstance[], HazelcastInstance[]> onSplit = (firstSubCluster, secondSubCluster) -> {
NoOutputSourceP.proceedLatch.countDown();
assertTrueEventually(() -> assertEquals(clusterSize + firstSubClusterSize, MockPS.initCount.get()));
long jobId = jobRef[0].getId();
assertTrueEventually(() -> {
JetServiceBackend service = getJetServiceBackend(firstSubCluster[0]);
assertEquals(COMPLETED, service.getJobCoordinationService().getJobStatus(jobId).get());
});
JetServiceBackend service2 = getJetServiceBackend(secondSubCluster[0]);
assertTrueEventually(() -> {
MasterContext masterContext = service2.getJobCoordinationService().getMasterContext(jobId);
assertNotNull(masterContext);
minorityJobFutureRef[0] = masterContext.jobContext().jobCompletionFuture();
});
assertTrueAllTheTime(() -> {
assertStatusNotRunningOrStarting(service2.getJobCoordinationService().getJobStatus(jobId).get());
}, 20);
};
Consumer<HazelcastInstance[]> afterMerge = instances -> {
assertTrueEventually(() -> {
assertEquals(clusterSize + firstSubClusterSize, MockPS.initCount.get());
assertEquals(clusterSize + firstSubClusterSize, MockPS.closeCount.get());
});
assertEquals(clusterSize, MockPS.receivedCloseErrors.size());
MockPS.receivedCloseErrors.forEach(t -> assertTrue("received " + t, t instanceof CancellationException));
try {
minorityJobFutureRef[0].get();
fail();
} catch (CancellationException expected) {
} catch (Exception e) {
throw new AssertionError(e);
}
};
testSplitBrain(firstSubClusterSize, secondSubClusterSize, beforeSplit, onSplit, afterMerge);
}
Aggregations