use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class AsyncTransformUsingServiceP_IntegrationTest method test_pipelineApi_mapPartitioned.
@Test
public void test_pipelineApi_mapPartitioned() {
Pipeline p = Pipeline.create();
p.readFrom(Sources.mapJournal(journaledMap, START_FROM_OLDEST, EventJournalMapEvent::getNewValue, alwaysTrue())).withoutTimestamps().groupingKey(i -> i % 10).mapUsingServiceAsync(serviceFactory, transformPartitionedFn(i -> i + "-1")).setLocalParallelism(2).writeTo(Sinks.list(sinkList));
instance().getJet().newJob(p, jobConfig);
assertResultEventually(i -> Stream.of(i + "-1"), NUM_ITEMS);
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class AsyncTransformUsingServiceBatchP_IntegrationTest method test_pipelineApi_mapNotPartitioned.
@Test
public void test_pipelineApi_mapNotPartitioned() {
Pipeline p = Pipeline.create();
p.readFrom(Sources.mapJournal(journaledMap, START_FROM_OLDEST, EventJournalMapEvent::getNewValue, alwaysTrue())).withoutTimestamps().mapUsingServiceAsyncBatched(serviceFactory, 128, transformNotPartitionedFn(i -> i + "-1")).setLocalParallelism(2).writeTo(Sinks.list(sinkList));
instance().getJet().newJob(p, jobConfig);
assertResult(i -> Stream.of(i + "-1"), NUM_ITEMS);
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class AsyncTransformUsingServiceBatchP_IntegrationTest method before.
@Before
public void before() {
journaledMap = instance().getMap(randomMapName("journaledMap"));
journaledMap.putAll(IntStream.range(0, NUM_ITEMS).boxed().collect(toMap(i -> i, i -> i)));
sinkList = instance().getList(randomMapName("sinkList"));
jobConfig = new JobConfig().setProcessingGuarantee(EXACTLY_ONCE).setSnapshotIntervalMillis(0);
serviceFactory = sharedService(pctx -> Executors.newFixedThreadPool(8), ExecutorService::shutdown);
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class AsyncTransformUsingServiceBatchP_IntegrationTest method stressTestInt.
private void stressTestInt(boolean restart) {
/*
This is a stress test of the cooperative emission using the DAG api. Only through DAG
API we can configure edge queue sizes, which we use to cause more trouble for the
cooperative emission.
*/
// add more input to the source map
int numItems = 10_000;
journaledMap.putAll(IntStream.range(NUM_ITEMS, numItems).boxed().collect(toMap(i -> i, i -> i)));
DAG dag = new DAG();
Vertex source = dag.newVertex("source", throttle(streamMapP(journaledMap.getName(), alwaysTrue(), EventJournalMapEvent::getNewValue, START_FROM_OLDEST, eventTimePolicy(i -> (long) ((Integer) i), WatermarkPolicy.limitingLag(10), 10, 0, 0)), 5000));
BiFunctionEx<ExecutorService, List<Integer>, CompletableFuture<Traverser<String>>> flatMapAsyncFn = transformNotPartitionedFn(i -> traverseItems(i + "-1", i + "-2", i + "-3", i + "-4", i + "-5")).andThen(r -> r.thenApply(results -> traverseIterable(results).flatMap(Function.identity())));
ProcessorSupplier processorSupplier = AsyncTransformUsingServiceBatchedP.supplier(serviceFactory, DEFAULT_MAX_CONCURRENT_OPS, 128, flatMapAsyncFn);
Vertex map = dag.newVertex("map", processorSupplier).localParallelism(2);
Vertex sink = dag.newVertex("sink", SinkProcessors.writeListP(sinkList.getName()));
// Use a shorter queue to not block the barrier from the source for too long due to
// the backpressure from the slow mapper
EdgeConfig edgeToMapperConfig = new EdgeConfig().setQueueSize(128);
// Use a shorter queue on output from the mapper so that we experience backpressure
// from the sink
EdgeConfig edgeFromMapperConfig = new EdgeConfig().setQueueSize(10);
dag.edge(between(source, map).setConfig(edgeToMapperConfig)).edge(between(map, sink).setConfig(edgeFromMapperConfig));
Job job = instance().getJet().newJob(dag, jobConfig);
for (int i = 0; restart && i < 5; i++) {
assertNotNull(job);
assertTrueEventually(() -> {
JobStatus status = job.getStatus();
assertTrue("status=" + status, status == RUNNING || status == COMPLETED);
});
sleepMillis(100);
try {
job.restart();
} catch (IllegalStateException e) {
assertTrue(e.toString(), e.getMessage().startsWith("Cannot RESTART_GRACEFUL"));
break;
}
}
assertResult(i -> Stream.of(i + "-1", i + "-2", i + "-3", i + "-4", i + "-5"), numItems);
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class JobSubmissionSlownessRegressionTest method regressionTestForPR1488.
@Test
public void regressionTestForPR1488() {
logger.info(String.format("Starting test with %d threads", THREADS_COUNT));
ExecutorService executorService = Executors.newFixedThreadPool(THREADS_COUNT);
double measurementARateSum = 0;
double measurementBRateSum = 0;
DAG dag = twoVertex();
HazelcastInstance client = createHazelcastClient();
while (measurementCount < MEASUREMENT_B_CYCLE_SECTION) {
AtomicInteger completedRoundTrips = new AtomicInteger();
long start = System.nanoTime();
List<Future<?>> futures = new ArrayList<>();
for (int i = 0; i < THREADS_COUNT; i++) {
Future<?> f = executorService.submit(() -> {
bench(() -> {
client.getJet().newJob(dag, new JobConfig()).join();
}, completedRoundTrips);
});
futures.add(f);
}
futures.forEach(f -> uncheckRun(f::get));
long elapsed = System.nanoTime() - start;
double rate = (double) completedRoundTrips.get() / (double) elapsed * SECONDS.toNanos(1);
System.out.println("Rate was " + rate + " req/s");
measurementCount++;
if (measurementCount > HEAT_UP_CYCLE_SECTION) {
// 3
if (measurementCount <= MEASUREMENT_A_CYCLE_SECTION) {
// 6
measurementARateSum += rate;
} else if (measurementCount > WAIT_BEFORE_MEASUREMENT_B_SECTION) {
// 9
measurementBRateSum += rate;
}
}
}
double measurementARate = measurementARateSum / MEASUREMENT_A_CYCLE_COUNT;
double measurementBRate = measurementBRateSum / MEASUREMENT_B_CYCLE_COUNT;
assertTrue("Job submission rate should not decrease. First rate: " + measurementARate + ", second rate: " + measurementBRate, measurementARate * 0.8 < measurementBRate);
}
Aggregations