use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class SourcesTest method remoteMapWithUnknownValueClass_whenQueryingIsNotNecessary.
@Test
public void remoteMapWithUnknownValueClass_whenQueryingIsNotNecessary() throws Exception {
// Given
URL jarResource = Thread.currentThread().getContextClassLoader().getResource("deployment/sample-pojo-1.0-car.jar");
assertNotNull("jar not found", jarResource);
ClassLoader cl = new URLClassLoader(new URL[] { jarResource });
Class<?> personClz = cl.loadClass("com.sample.pojo.car.Car");
Object person = personClz.getConstructor(String.class, String.class).newInstance("make", "model");
IMap<String, Object> map = remoteHz.getMap(srcName);
// the class of the value is unknown to the remote IMDG member, it will be only known to Jet
map.put("key", person);
// When
BatchSource<Entry<String, Object>> source = Sources.remoteMap(srcName, clientConfig);
// Then
p.readFrom(source).map(en -> en.getValue().toString()).writeTo(sink);
JobConfig jobConfig = new JobConfig();
jobConfig.addJar(jarResource);
hz().getJet().newJob(p, jobConfig).join();
List<Object> expected = singletonList(person.toString());
List<Object> actual = new ArrayList<>(sinkList);
assertEquals(expected, actual);
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class SourcesTest method remoteCacheWithUnknownValueClass.
@Test
public void remoteCacheWithUnknownValueClass() throws Exception {
// Given
URL jarResource = Thread.currentThread().getContextClassLoader().getResource("deployment/sample-pojo-1.0-car.jar");
assertNotNull("jar not found", jarResource);
ClassLoader cl = new URLClassLoader(new URL[] { jarResource });
Class<?> personClz = cl.loadClass("com.sample.pojo.car.Car");
Object person = personClz.getConstructor(String.class, String.class).newInstance("make", "model");
ICache<String, Object> cache = remoteHz.getCacheManager().getCache(srcName);
// the class of the value is unknown to the remote IMDG member, it will be only known to Jet
cache.put("key", person);
// When
BatchSource<Entry<String, Object>> source = Sources.remoteCache(srcName, clientConfig);
// Then
p.readFrom(source).map(en -> en.getValue().toString()).writeTo(sink);
JobConfig jobConfig = new JobConfig();
jobConfig.addJar(jarResource);
hz().getJet().newJob(p, jobConfig).join();
List<Object> expected = singletonList(person.toString());
List<Object> actual = new ArrayList<>(sinkList);
assertEquals(expected, actual);
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class Sources_withEventJournalTest method remoteCacheJournal_withUnknownValueClass.
@Test
public void remoteCacheJournal_withUnknownValueClass() throws Exception {
// Given
URL jarResource = Thread.currentThread().getContextClassLoader().getResource("deployment/sample-pojo-1.0-car.jar");
assertNotNull("jar not found", jarResource);
ClassLoader cl = new URLClassLoader(new URL[] { jarResource });
Class<?> carClz = cl.loadClass("com.sample.pojo.car.Car");
Object car = carClz.getConstructor(String.class, String.class).newInstance("make", "model");
String cacheName = JOURNALED_CACHE_PREFIX + randomName();
ICache<String, Object> cache = remoteHz.getCacheManager().getCache(cacheName);
// the class of the value is unknown to the remote IMDG member, it will be only known to Jet
cache.put("key", car);
// When
StreamSource<Entry<Object, Object>> source = Sources.remoteCacheJournal(cacheName, clientConfig, START_FROM_OLDEST);
// Then
p.readFrom(source).withoutTimestamps().map(en -> en.getValue().toString()).writeTo(sink);
JobConfig jobConfig = new JobConfig();
jobConfig.addJar(jarResource);
Job job = hz().getJet().newJob(p, jobConfig);
List<Object> expected = singletonList(car.toString());
assertTrueEventually(() -> assertEquals(expected, new ArrayList<>(sinkList)), 10);
job.cancel();
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class AsyncTransformUsingServiceP_IntegrationTest method test_pipelineApi_mapNotPartitioned.
@Test
public void test_pipelineApi_mapNotPartitioned() {
Pipeline p = Pipeline.create();
p.readFrom(Sources.mapJournal(journaledMap, START_FROM_OLDEST, EventJournalMapEvent::getNewValue, alwaysTrue())).withoutTimestamps().mapUsingServiceAsync(serviceFactory, transformNotPartitionedFn(i -> i + "-1")).setLocalParallelism(2).writeTo(Sinks.list(sinkList));
instance().getJet().newJob(p, jobConfig);
assertResultEventually(i -> Stream.of(i + "-1"), NUM_ITEMS);
}
use of com.hazelcast.jet.config.JobConfig in project hazelcast by hazelcast.
the class AsyncTransformUsingServiceP_IntegrationTest method stressTestInt.
private void stressTestInt(boolean restart) {
/*
This is a stress test of the cooperative emission using the DAG api. Only through DAG
API we can configure edge queue sizes, which we use to cause more trouble for the
cooperative emission.
*/
// add more input to the source map
int numItems = 10_000;
journaledMap.putAll(IntStream.range(NUM_ITEMS, numItems).boxed().collect(toMap(i -> i, i -> i)));
DAG dag = new DAG();
Vertex source = dag.newVertex("source", throttle(streamMapP(journaledMap.getName(), alwaysTrue(), EventJournalMapEvent::getNewValue, START_FROM_OLDEST, eventTimePolicy(i -> (long) ((Integer) i), WatermarkPolicy.limitingLag(10), 10, 0, 0)), 5000));
BiFunctionEx<ExecutorService, Integer, CompletableFuture<Traverser<String>>> flatMapAsyncFn = transformNotPartitionedFn(i -> traverseItems(i + "-1", i + "-2", i + "-3", i + "-4", i + "-5"));
ProcessorSupplier processorSupplier = ordered ? AsyncTransformUsingServiceOrderedP.supplier(serviceFactory, DEFAULT_MAX_CONCURRENT_OPS, flatMapAsyncFn) : AsyncTransformUsingServiceUnorderedP.supplier(serviceFactory, DEFAULT_MAX_CONCURRENT_OPS, flatMapAsyncFn, identity());
Vertex map = dag.newVertex("map", processorSupplier).localParallelism(2);
Vertex sink = dag.newVertex("sink", SinkProcessors.writeListP(sinkList.getName()));
// Use a shorter queue to not block the barrier from the source for too long due to
// the backpressure from the slow mapper
EdgeConfig edgeToMapperConfig = new EdgeConfig().setQueueSize(128);
// Use a shorter queue on output from the mapper so that we experience backpressure
// from the sink
EdgeConfig edgeFromMapperConfig = new EdgeConfig().setQueueSize(10);
dag.edge(between(source, map).setConfig(edgeToMapperConfig)).edge(between(map, sink).setConfig(edgeFromMapperConfig));
Job job = instance().getJet().newJob(dag, jobConfig);
for (int i = 0; restart && i < 5; i++) {
assertJobStatusEventually(job, RUNNING);
sleepMillis(100);
job.restart();
}
assertResultEventually(i -> Stream.of(i + "-1", i + "-2", i + "-3", i + "-4", i + "-5"), numItems);
}
Aggregations