use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class JobLifecycleMetricsTest method multipleJobsSubmittedAndCompleted.
@Test
public void multipleJobsSubmittedAndCompleted() {
// when
Job job1 = hzInstances[0].getJet().newJob(batchPipeline());
job1.join();
job1.cancel();
// then
assertTrueEventually(() -> assertJobStats(1, 1, 1, 1, 0));
// given
DAG dag = new DAG();
Throwable e = new AssertionError("mock error");
Vertex source = dag.newVertex("source", ListSource.supplier(singletonList(1)));
Vertex process = dag.newVertex("faulty", new MockPMS(() -> new MockPS(() -> new MockP().setProcessError(e), MEMBER_COUNT)));
dag.edge(between(source, process));
// when
Job job2 = hzInstances[0].getJet().newJob(dag);
try {
job2.join();
fail("Expected exception not thrown!");
} catch (Exception ex) {
// ignore
}
// then
assertTrueEventually(() -> assertJobStats(2, 2, 2, 1, 1));
}
use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class JobMetrics_MiscTest method when_jobNotYetRunning_then_emptyMetrics.
@Test
public void when_jobNotYetRunning_then_emptyMetrics() {
DAG dag = new DAG();
BlockingInInitMetaSupplier.latch = new CountDownLatch(1);
dag.newVertex("v1", new BlockingInInitMetaSupplier());
Job job = hz().getJet().newJob(dag, JOB_CONFIG_WITH_METRICS);
assertTrueAllTheTime(() -> assertEmptyJobMetrics(job, false), 2);
BlockingInInitMetaSupplier.latch.countDown();
assertTrueEventually(() -> assertJobHasMetrics(job, false));
}
use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class JobMetrics_MiscTest method when_metricsForJobDisabled_then_emptyMetrics.
@Test
public void when_metricsForJobDisabled_then_emptyMetrics() throws Throwable {
DAG dag = new DAG();
dag.newVertex("v1", MockP::new);
dag.newVertex("v2", (SupplierEx<Processor>) NoOutputSourceP::new);
JobConfig config = new JobConfig().setMetricsEnabled(false).setStoreMetricsAfterJobCompletion(true);
Job job = hz().getJet().newJob(dag, config);
// when
NoOutputSourceP.executionStarted.await();
assertJobStatusEventually(job, JobStatus.RUNNING);
// then
assertTrueAllTheTime(() -> assertEmptyJobMetrics(job, false), 2);
// when
NoOutputSourceP.proceedLatch.countDown();
job.join();
assertJobStatusEventually(job, JobStatus.COMPLETED);
// then
assertEmptyJobMetrics(job, true);
}
use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class AsyncTransformUsingServiceP_IntegrationTest method stressTestInt.
private void stressTestInt(boolean restart) {
/*
This is a stress test of the cooperative emission using the DAG api. Only through DAG
API we can configure edge queue sizes, which we use to cause more trouble for the
cooperative emission.
*/
// add more input to the source map
int numItems = 10_000;
journaledMap.putAll(IntStream.range(NUM_ITEMS, numItems).boxed().collect(toMap(i -> i, i -> i)));
DAG dag = new DAG();
Vertex source = dag.newVertex("source", throttle(streamMapP(journaledMap.getName(), alwaysTrue(), EventJournalMapEvent::getNewValue, START_FROM_OLDEST, eventTimePolicy(i -> (long) ((Integer) i), WatermarkPolicy.limitingLag(10), 10, 0, 0)), 5000));
BiFunctionEx<ExecutorService, Integer, CompletableFuture<Traverser<String>>> flatMapAsyncFn = transformNotPartitionedFn(i -> traverseItems(i + "-1", i + "-2", i + "-3", i + "-4", i + "-5"));
ProcessorSupplier processorSupplier = ordered ? AsyncTransformUsingServiceOrderedP.supplier(serviceFactory, DEFAULT_MAX_CONCURRENT_OPS, flatMapAsyncFn) : AsyncTransformUsingServiceUnorderedP.supplier(serviceFactory, DEFAULT_MAX_CONCURRENT_OPS, flatMapAsyncFn, identity());
Vertex map = dag.newVertex("map", processorSupplier).localParallelism(2);
Vertex sink = dag.newVertex("sink", SinkProcessors.writeListP(sinkList.getName()));
// Use a shorter queue to not block the barrier from the source for too long due to
// the backpressure from the slow mapper
EdgeConfig edgeToMapperConfig = new EdgeConfig().setQueueSize(128);
// Use a shorter queue on output from the mapper so that we experience backpressure
// from the sink
EdgeConfig edgeFromMapperConfig = new EdgeConfig().setQueueSize(10);
dag.edge(between(source, map).setConfig(edgeToMapperConfig)).edge(between(map, sink).setConfig(edgeFromMapperConfig));
Job job = instance().getJet().newJob(dag, jobConfig);
for (int i = 0; restart && i < 5; i++) {
assertJobStatusEventually(job, RUNNING);
sleepMillis(100);
job.restart();
}
assertResultEventually(i -> Stream.of(i + "-1", i + "-2", i + "-3", i + "-4", i + "-5"), numItems);
}
use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class Processors_globalAggregationIntegrationTest method runTest.
private void runTest(List<Long> sourceItems, Long expectedOutput) throws Exception {
HazelcastInstance instance = createHazelcastInstance();
AggregateOperation1<Long, ?, Long> summingOp = summingLong((Long l) -> l);
DAG dag = new DAG();
Vertex source = dag.newVertex("source", () -> new ListSource(sourceItems)).localParallelism(1);
Vertex sink = dag.newVertex("sink", writeListP("sink"));
if (singleStageProcessor) {
Vertex aggregate = dag.newVertex("aggregate", Processors.aggregateP(summingOp)).localParallelism(1);
dag.edge(between(source, aggregate).distributed().allToOne("foo")).edge(between(aggregate, sink).isolated());
} else {
Vertex accumulate = dag.newVertex("accumulate", Processors.accumulateP(summingOp));
Vertex combine = dag.newVertex("combine", combineP(summingOp)).localParallelism(1);
dag.edge(between(source, accumulate)).edge(between(accumulate, combine).distributed().allToOne("foo")).edge(between(combine, sink).isolated());
}
instance.getJet().newJob(dag).join();
IList<Long> sinkList = instance.getList("sink");
assertEquals(singletonList(expectedOutput), new ArrayList<>(sinkList));
// wait a little more and make sure, that there are no more frames
Thread.sleep(1000);
assertEquals(singletonList(expectedOutput), new ArrayList<>(sinkList));
assertEquals(expectedOutput, sinkList.get(0));
}
Aggregations