use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class JetClassLoaderTest method when_jobCompleted_then_classLoaderShutDown.
@Test
public void when_jobCompleted_then_classLoaderShutDown() {
DAG dag = new DAG();
dag.newVertex("v", LeakClassLoaderP::new).localParallelism(1);
Config config = smallInstanceWithResourceUploadConfig();
HazelcastInstance instance = createHazelcastInstance(config);
// When
instance.getJet().newJob(dag).join();
// Then
assertTrue("The classloader should have been shutdown after job completion", LeakClassLoaderP.classLoader.isShutdown());
}
use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class NonSmartClientTest method startJobAndVerifyItIsRunning.
private Job startJobAndVerifyItIsRunning() {
String jobName = randomName();
DAG dag = streamingDag();
Job job = nonMasterClient.getJet().newJob(dag, new JobConfig().setName(jobName));
assertJobStatusEventually(nonMasterClient.getJet().getJob(jobName), JobStatus.RUNNING);
return job;
}
use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class MetricsTest method nonCooperativeProcessor.
@Test
public void nonCooperativeProcessor() {
DAG dag = new DAG();
Vertex source = dag.newVertex("source", TestProcessors.ListSource.supplier(asList(1L, 2L, 3L)));
Vertex map = dag.newVertex("map", new NonCoopTransformPSupplier((FunctionEx<Long, Long>) l -> {
Metrics.metric("mapped").increment();
return l * 10L;
}));
Vertex sink = dag.newVertex("sink", writeListP("results"));
dag.edge(between(source, map)).edge(between(map, sink));
Job job = runPipeline(dag);
JobMetricsChecker checker = new JobMetricsChecker(job);
checker.assertSummedMetricValue("mapped", 3L);
assertEquals(new HashSet<>(Arrays.asList(10L, 20L, 30L)), new HashSet<>(instance.getList("results")));
}
use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class AsyncTransformUsingServiceBatchP_IntegrationTest method stressTestInt.
private void stressTestInt(boolean restart) {
/*
This is a stress test of the cooperative emission using the DAG api. Only through DAG
API we can configure edge queue sizes, which we use to cause more trouble for the
cooperative emission.
*/
// add more input to the source map
int numItems = 10_000;
journaledMap.putAll(IntStream.range(NUM_ITEMS, numItems).boxed().collect(toMap(i -> i, i -> i)));
DAG dag = new DAG();
Vertex source = dag.newVertex("source", throttle(streamMapP(journaledMap.getName(), alwaysTrue(), EventJournalMapEvent::getNewValue, START_FROM_OLDEST, eventTimePolicy(i -> (long) ((Integer) i), WatermarkPolicy.limitingLag(10), 10, 0, 0)), 5000));
BiFunctionEx<ExecutorService, List<Integer>, CompletableFuture<Traverser<String>>> flatMapAsyncFn = transformNotPartitionedFn(i -> traverseItems(i + "-1", i + "-2", i + "-3", i + "-4", i + "-5")).andThen(r -> r.thenApply(results -> traverseIterable(results).flatMap(Function.identity())));
ProcessorSupplier processorSupplier = AsyncTransformUsingServiceBatchedP.supplier(serviceFactory, DEFAULT_MAX_CONCURRENT_OPS, 128, flatMapAsyncFn);
Vertex map = dag.newVertex("map", processorSupplier).localParallelism(2);
Vertex sink = dag.newVertex("sink", SinkProcessors.writeListP(sinkList.getName()));
// Use a shorter queue to not block the barrier from the source for too long due to
// the backpressure from the slow mapper
EdgeConfig edgeToMapperConfig = new EdgeConfig().setQueueSize(128);
// Use a shorter queue on output from the mapper so that we experience backpressure
// from the sink
EdgeConfig edgeFromMapperConfig = new EdgeConfig().setQueueSize(10);
dag.edge(between(source, map).setConfig(edgeToMapperConfig)).edge(between(map, sink).setConfig(edgeFromMapperConfig));
Job job = instance().getJet().newJob(dag, jobConfig);
for (int i = 0; restart && i < 5; i++) {
assertNotNull(job);
assertTrueEventually(() -> {
JobStatus status = job.getStatus();
assertTrue("status=" + status, status == RUNNING || status == COMPLETED);
});
sleepMillis(100);
try {
job.restart();
} catch (IllegalStateException e) {
assertTrue(e.toString(), e.getMessage().startsWith("Cannot RESTART_GRACEFUL"));
break;
}
}
assertResult(i -> Stream.of(i + "-1", i + "-2", i + "-3", i + "-4", i + "-5"), numItems);
}
use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class HazelcastRemoteConnectorTest method when_streamRemoteMap.
@Test
public void when_streamRemoteMap() {
DAG dag = new DAG();
Vertex source = dag.newVertex(SOURCE_NAME, streamRemoteMapP(SOURCE_NAME, clientConfig, START_FROM_OLDEST, eventTimePolicy(Entry<Integer, Integer>::getValue, limitingLag(0), 1, 0, 10_000)));
Vertex sink = dag.newVertex(SINK_NAME, writeListP(SINK_NAME));
dag.edge(between(source, sink));
Job job = localHz.getJet().newJob(dag);
populateMap(remoteHz.getMap(SOURCE_NAME));
assertSizeEventually(ITEM_COUNT, localHz.getList(SINK_NAME));
job.cancel();
}
Aggregations