use of com.hazelcast.jet.core.TestProcessors.NoOutputSourceP in project hazelcast by hazelcast.
the class MetricsTest method test_sourceSinkTag.
@Test
public void test_sourceSinkTag() {
DAG dag = new DAG();
Vertex src = dag.newVertex("src", () -> new NoOutputSourceP());
Vertex mid = dag.newVertex("mid", Processors.mapP(identity()));
Vertex sink = dag.newVertex("sink", Processors.noopP());
dag.edge(Edge.between(src, mid));
dag.edge(Edge.between(mid, sink));
Job job = instance.getJet().newJob(dag, new JobConfig().setProcessingGuarantee(EXACTLY_ONCE).setSnapshotIntervalMillis(100));
assertJobStatusEventually(job, RUNNING);
JobMetrics[] metrics = { null };
assertTrueEventually(() -> assertNotEquals(0, (metrics[0] = job.getMetrics()).metrics().size()));
assertSourceSinkTags(metrics[0], "src", true, true, false);
assertSourceSinkTags(metrics[0], "mid", true, false, false);
assertSourceSinkTags(metrics[0], "sink", true, false, true);
// restart after a snapshot so that the job will restart from a snapshot. Check the source/sink tags afterwards.
waitForFirstSnapshot(new JobRepository(instance), job.getId(), 10, true);
job.restart();
assertJobStatusEventually(job, RUNNING);
assertTrueEventually(() -> assertNotEquals(0, (metrics[0] = job.getMetrics()).metrics().size()));
assertSourceSinkTags(metrics[0], "src", false, true, false);
assertSourceSinkTags(metrics[0], "mid", false, false, false);
assertSourceSinkTags(metrics[0], "sink", false, false, true);
}
use of com.hazelcast.jet.core.TestProcessors.NoOutputSourceP in project hazelcast by hazelcast.
the class SplitBrainTest method when_quorumIsLostOnBothSides_then_jobRestartsAfterMerge.
@Test
public void when_quorumIsLostOnBothSides_then_jobRestartsAfterMerge() {
int firstSubClusterSize = 2;
int secondSubClusterSize = 2;
int clusterSize = firstSubClusterSize + secondSubClusterSize;
NoOutputSourceP.executionStarted = new CountDownLatch(clusterSize * PARALLELISM);
Job[] jobRef = new Job[1];
Consumer<HazelcastInstance[]> beforeSplit = instances -> {
MockPS processorSupplier = new MockPS(NoOutputSourceP::new, clusterSize);
DAG dag = new DAG().vertex(new Vertex("test", processorSupplier));
jobRef[0] = instances[0].getJet().newJob(dag, new JobConfig().setSplitBrainProtection(true));
assertOpenEventually(NoOutputSourceP.executionStarted);
};
BiConsumer<HazelcastInstance[], HazelcastInstance[]> onSplit = (firstSubCluster, secondSubCluster) -> {
NoOutputSourceP.proceedLatch.countDown();
long jobId = jobRef[0].getId();
assertTrueEventually(() -> {
JetServiceBackend service1 = getJetServiceBackend(firstSubCluster[0]);
JetServiceBackend service2 = getJetServiceBackend(secondSubCluster[0]);
MasterContext masterContext = service1.getJobCoordinationService().getMasterContext(jobId);
assertNotNull(masterContext);
masterContext = service2.getJobCoordinationService().getMasterContext(jobId);
assertNotNull(masterContext);
});
assertTrueAllTheTime(() -> {
JetServiceBackend service1 = getJetServiceBackend(firstSubCluster[0]);
JetServiceBackend service2 = getJetServiceBackend(secondSubCluster[0]);
JobStatus status1 = service1.getJobCoordinationService().getJobStatus(jobId).get();
JobStatus status2 = service2.getJobCoordinationService().getJobStatus(jobId).get();
assertStatusNotRunningOrStarting(status1);
assertStatusNotRunningOrStarting(status2);
}, 20);
};
Consumer<HazelcastInstance[]> afterMerge = instances -> {
assertTrueEventually(() -> {
assertEquals(clusterSize * 2, MockPS.initCount.get());
assertEquals(clusterSize * 2, MockPS.closeCount.get());
});
assertEquals(clusterSize, MockPS.receivedCloseErrors.size());
MockPS.receivedCloseErrors.forEach(t -> assertTrue("received " + t, t instanceof CancellationException));
};
testSplitBrain(firstSubClusterSize, secondSubClusterSize, beforeSplit, onSplit, afterMerge);
}
use of com.hazelcast.jet.core.TestProcessors.NoOutputSourceP in project hazelcast by hazelcast.
the class SplitBrainTest method when_newMemberIsAddedAfterClusterSizeFallsBelowQuorumSize_then_jobRestartDoesNotSucceed.
@Test
public void when_newMemberIsAddedAfterClusterSizeFallsBelowQuorumSize_then_jobRestartDoesNotSucceed() {
int clusterSize = 5;
HazelcastInstance[] instances = new HazelcastInstance[clusterSize];
for (int i = 0; i < clusterSize; i++) {
instances[i] = createHazelcastInstance(createConfig());
}
NoOutputSourceP.executionStarted = new CountDownLatch(clusterSize * PARALLELISM);
MockPS processorSupplier = new MockPS(NoOutputSourceP::new, clusterSize);
DAG dag = new DAG().vertex(new Vertex("test", processorSupplier).localParallelism(PARALLELISM));
Job job = instances[0].getJet().newJob(dag, new JobConfig().setSplitBrainProtection(true));
assertOpenEventually(NoOutputSourceP.executionStarted);
for (int i = 1; i < clusterSize; i++) {
instances[i].shutdown();
}
NoOutputSourceP.proceedLatch.countDown();
assertJobStatusEventually(job, NOT_RUNNING, 10);
HazelcastInstance instance6 = createHazelcastInstance(createConfig());
assertTrueAllTheTime(() -> assertStatusNotRunningOrStarting(job.getStatus()), 5);
// The test ends with a cluster size 2, which is below quorum
// Start another instance so the job can restart and be cleaned up correctly
HazelcastInstance instance7 = createHazelcastInstance(createConfig());
waitAllForSafeState(newArrayList(instances[0], instance6, instance7));
assertTrueEventually(() -> assertStatusRunningOrCompleted(job.getStatus()), 5);
}
use of com.hazelcast.jet.core.TestProcessors.NoOutputSourceP in project hazelcast by hazelcast.
the class SplitBrainTest method when_splitBrainProtectionDisabled_then_jobRunsTwiceAndAgainOnceAfterHeal.
@Test
public void when_splitBrainProtectionDisabled_then_jobRunsTwiceAndAgainOnceAfterHeal() {
int firstSubClusterSize = 3;
int secondSubClusterSize = 2;
int clusterSize = firstSubClusterSize + secondSubClusterSize;
NoOutputSourceP.executionStarted = new CountDownLatch(secondSubClusterSize * PARALLELISM);
Job[] jobRef = new Job[1];
Consumer<HazelcastInstance[]> beforeSplit = instances -> {
MockPS processorSupplier = new MockPS(NoOutputSourceP::new, clusterSize);
DAG dag = new DAG().vertex(new Vertex("test", processorSupplier));
jobRef[0] = instances[0].getJet().newJob(dag, new JobConfig().setSplitBrainProtection(false));
assertTrueEventually(() -> assertEquals("initCount", clusterSize, MockPS.initCount.get()), 10);
assertOpenEventually("executionStarted", NoOutputSourceP.executionStarted);
};
BiConsumer<HazelcastInstance[], HazelcastInstance[]> onSplit = (firstSubCluster, secondSubCluster) -> {
Job jobRef1 = firstSubCluster[0].getJet().getJob(jobRef[0].getId());
Job jobRef2 = secondSubCluster[0].getJet().getJob(jobRef[0].getId());
assertNotNull("jobRef1", jobRef1);
assertNotNull("jobRef2", jobRef2);
assertTrueEventually(() -> assertEquals("job not running on subcluster 1", RUNNING, jobRef1.getStatus()));
assertTrueEventually(() -> assertEquals("job not running on subcluster 2", RUNNING, jobRef2.getStatus()));
// we need assert-eventually here because we might observe RUNNING state from an execution before the split
assertTrueEventually(() -> assertEquals("initCount", clusterSize * 2, MockPS.initCount.get()));
};
Consumer<HazelcastInstance[]> afterMerge = instances -> {
// this assert will hold after the job scales up
assertTrueEventually(() -> assertEquals(clusterSize * 3, MockPS.initCount.get()), 20);
};
testSplitBrain(firstSubClusterSize, secondSubClusterSize, beforeSplit, onSplit, afterMerge);
}
use of com.hazelcast.jet.core.TestProcessors.NoOutputSourceP in project hazelcast by hazelcast.
the class SplitBrainTest method when_splitBrainProtectionIsDisabled_then_jobCompletesOnBothSides.
@Test
public void when_splitBrainProtectionIsDisabled_then_jobCompletesOnBothSides() {
int firstSubClusterSize = 2;
int secondSubClusterSize = 2;
int clusterSize = firstSubClusterSize + secondSubClusterSize;
NoOutputSourceP.executionStarted = new CountDownLatch(clusterSize * PARALLELISM);
Job[] jobRef = new Job[1];
Consumer<HazelcastInstance[]> beforeSplit = instances -> {
MockPS processorSupplier = new MockPS(NoOutputSourceP::new, clusterSize);
DAG dag = new DAG().vertex(new Vertex("test", processorSupplier));
jobRef[0] = instances[0].getJet().newJob(dag);
assertOpenEventually(NoOutputSourceP.executionStarted);
};
BiConsumer<HazelcastInstance[], HazelcastInstance[]> onSplit = (firstSubCluster, secondSubCluster) -> {
NoOutputSourceP.proceedLatch.countDown();
long jobId = jobRef[0].getId();
assertTrueEventually(() -> {
JetServiceBackend service1 = getJetServiceBackend(firstSubCluster[0]);
JetServiceBackend service2 = getJetServiceBackend(secondSubCluster[0]);
assertEquals(COMPLETED, service1.getJobCoordinationService().getJobStatus(jobId).get());
assertEquals(COMPLETED, service2.getJobCoordinationService().getJobStatus(jobId).get());
});
};
Consumer<HazelcastInstance[]> afterMerge = instances -> {
assertTrueEventually(() -> {
assertEquals("init count", clusterSize * 2, MockPS.initCount.get());
assertEquals("close count", clusterSize * 2, MockPS.closeCount.get());
});
assertEquals(clusterSize, MockPS.receivedCloseErrors.size());
MockPS.receivedCloseErrors.forEach(t -> assertTrue("received " + t, t instanceof CancellationException));
};
testSplitBrain(firstSubClusterSize, secondSubClusterSize, beforeSplit, onSplit, afterMerge);
}
Aggregations