use of com.hazelcast.jet.core.TestProcessors.NoOutputSourceP in project hazelcast by hazelcast.
the class TopologyChangeDuringJobSubmissionTest method when_coordinatorLeavesDuringSubmission_then_submissionCallReturnsSuccessfully.
@Test
public void when_coordinatorLeavesDuringSubmission_then_submissionCallReturnsSuccessfully() throws Throwable {
// Given that the job has submitted
dropOperationsBetween(instance1, instance2, SpiDataSerializerHook.F_ID, singletonList(SpiDataSerializerHook.NORMAL_RESPONSE));
Future<Job> future = spawn(() -> {
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(NoOutputSourceP::new, 1)));
return instance2.getJet().newJob(dag);
});
NoOutputSourceP.executionStarted.await();
// When the coordinator leaves before the submission response is received
instance1.getLifecycleService().terminate();
Job job = future.get();
// Then the job completes successfully
NoOutputSourceP.proceedLatch.countDown();
job.join();
assertEquals(2, MockPS.initCount.get());
}
use of com.hazelcast.jet.core.TestProcessors.NoOutputSourceP in project hazelcast by hazelcast.
the class JobMetrics_NonSharedClusterTest method when_noMetricCollectionYet_then_emptyMetrics.
@Test
public void when_noMetricCollectionYet_then_emptyMetrics() {
Config config = smallInstanceConfig();
config.getMetricsConfig().setCollectionFrequencySeconds(10_000);
HazelcastInstance inst = createHazelcastInstance(config);
DAG dag = new DAG();
dag.newVertex("v1", (SupplierEx<Processor>) NoOutputSourceP::new).localParallelism(1);
// Initial collection interval is 1 second. So let's run a job and wait until it has metrics.
Job job1 = inst.getJet().newJob(dag, JOB_CONFIG_WITH_METRICS);
try {
JetTestSupport.assertTrueEventually(() -> assertFalse(job1.getMetrics().metrics().isEmpty()), 10);
} catch (AssertionError e) {
// If we don't get metrics in 10 seconds, ignore it, we probably missed the first collection
// with this job. We might have caught a different error, let's log it at least.
logger.warning("Ignoring this error: " + e, e);
}
// Let's do a second job for which we know there will be no metrics collection. It should
// return empty metrics because the next collection will be in 10_000 seconds.
Job job2 = inst.getJet().newJob(dag, JOB_CONFIG_WITH_METRICS);
assertJobStatusEventually(job2, RUNNING);
assertTrue(job2.getMetrics().metrics().isEmpty());
}
use of com.hazelcast.jet.core.TestProcessors.NoOutputSourceP in project hazelcast by hazelcast.
the class TopologyChangeTest method when_coordinatorLeavesDuringExecution_then_clientStillGetsJobResult.
@Test
public void when_coordinatorLeavesDuringExecution_then_clientStillGetsJobResult() throws Throwable {
// Given
HazelcastInstance client = createHazelcastClient();
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(NoOutputSourceP::new, nodeCount)));
// When
Job job = client.getJet().newJob(dag);
NoOutputSourceP.executionStarted.await();
instances[0].getLifecycleService().terminate();
NoOutputSourceP.proceedLatch.countDown();
// Then
job.join();
}
use of com.hazelcast.jet.core.TestProcessors.NoOutputSourceP in project hazelcast by hazelcast.
the class ExecutionLifecycleTest method when_oneOfTwoJobsFails_then_theOtherContinues.
@Test
public void when_oneOfTwoJobsFails_then_theOtherContinues() throws Exception {
// Given
DAG dagFaulty = new DAG().vertex(new Vertex("faulty", new MockPMS(() -> new MockPS(() -> new MockP().setCompleteError(MOCK_ERROR), MEMBER_COUNT))));
DAG dagGood = new DAG();
dagGood.newVertex("good", () -> new NoOutputSourceP());
// When
Job jobGood = newJob(dagGood);
NoOutputSourceP.executionStarted.await();
runJobExpectFailure(dagFaulty, false);
// Then
assertTrueAllTheTime(() -> assertFalse(jobGood.getFuture().isDone()), 2);
NoOutputSourceP.proceedLatch.countDown();
jobGood.join();
}
use of com.hazelcast.jet.core.TestProcessors.NoOutputSourceP in project hazelcast by hazelcast.
the class ExecutionLifecycleTest method when_clientJoinBeforeAndAfterComplete_then_exceptionEquals.
@Test
public void when_clientJoinBeforeAndAfterComplete_then_exceptionEquals() {
// not applicable to light jobs - we can't connect to light jobs after they complete
assumeFalse(useLightJob);
DAG dag = new DAG();
Vertex noop = dag.newVertex("noop", (SupplierEx<Processor>) NoOutputSourceP::new).localParallelism(1);
Vertex faulty = dag.newVertex("faulty", () -> new MockP().setCompleteError(MOCK_ERROR)).localParallelism(1);
dag.edge(between(noop, faulty));
Job job = newJob(client(), dag, null);
assertJobStatusEventually(job, RUNNING);
NoOutputSourceP.proceedLatch.countDown();
Throwable excBeforeComplete;
Throwable excAfterComplete;
try {
job.join();
throw new AssertionError("should have failed");
} catch (Exception e) {
excBeforeComplete = e;
}
// create a new client that will join the job after completion
HazelcastInstance client2 = factory().newHazelcastClient();
Job job2 = client2.getJet().getJob(job.getId());
try {
job2.join();
throw new AssertionError("should have failed");
} catch (Exception e) {
excAfterComplete = e;
}
logger.info("exception before completion", excBeforeComplete);
logger.info("exception after completion", excAfterComplete);
// Then
assertInstanceOf(CompletionException.class, excBeforeComplete);
assertInstanceOf(CompletionException.class, excAfterComplete);
Throwable causeBefore = excBeforeComplete.getCause();
Throwable causeAfter = excAfterComplete.getCause();
assertEquals(causeBefore.getClass(), causeAfter.getClass());
assertContains(causeAfter.getMessage(), causeBefore.getMessage());
}
Aggregations