use of com.hazelcast.jet.IMapJet in project hazelcast-jet-reference-manual by hazelcast.
the class JUS method s1.
static void s1() {
JetInstance jet = Jet.newJetInstance();
try {
// tag::s1[]
IMapJet<String, Integer> map = jet.getMap("latitudes");
map.put("London", 51);
map.put("Paris", 48);
map.put("NYC", 40);
map.put("Sydney", -34);
map.put("Sao Paulo", -23);
map.put("Jakarta", -6);
DistributedStream.fromMap(map).filter(e -> e.getValue() < 0).forEach(System.out::println);
// end::s1[]
} finally {
Jet.shutdownAll();
}
}
use of com.hazelcast.jet.IMapJet in project hazelcast-jet by hazelcast.
the class HazelcastConnectorTest method when_streamMap_withFilterAndProjection.
@Test
public void when_streamMap_withFilterAndProjection() {
DAG dag = new DAG();
Vertex source = dag.newVertex("source", SourceProcessors.<Integer, Integer, Integer>streamMapP(streamSourceName, event -> event.getKey() != 0, EventJournalMapEvent::getKey, START_FROM_OLDEST, wmGenParams(i -> i, limitingLag(0), noThrottling(), 10_000)));
Vertex sink = dag.newVertex("sink", writeListP(streamSinkName));
dag.edge(between(source, sink));
Job job = jetInstance.newJob(dag);
IMapJet<Integer, Integer> sourceMap = jetInstance.getMap(streamSourceName);
range(0, ENTRY_COUNT).forEach(i -> sourceMap.put(i, i));
assertSizeEventually(ENTRY_COUNT - 1, jetInstance.getList(streamSinkName));
assertFalse(jetInstance.getList(streamSinkName).contains(0));
assertTrue(jetInstance.getList(streamSinkName).contains(1));
job.cancel();
}
use of com.hazelcast.jet.IMapJet in project hazelcast-jet by hazelcast.
the class SnapshotFailureTest method when_snapshotFails_then_jobShouldNotFail.
@Test
public void when_snapshotFails_then_jobShouldNotFail() {
int numPartitions = 2;
int numElements = 10;
IMapJet<Object, Object> results = instance1.getMap("results");
DAG dag = new DAG();
SequencesInPartitionsMetaSupplier sup = new SequencesInPartitionsMetaSupplier(numPartitions, numElements);
Vertex generator = dag.newVertex("generator", peekOutputP(throttle(sup, 2))).localParallelism(1);
Vertex writeMap = dag.newVertex("writeMap", writeMapP(results.getName())).localParallelism(1);
dag.edge(between(generator, writeMap));
JobConfig config = new JobConfig();
config.setProcessingGuarantee(ProcessingGuarantee.EXACTLY_ONCE);
config.setSnapshotIntervalMillis(100);
Job job = instance1.newJob(dag, config);
// let's start a second job that will watch the snapshots map and write failed
// SnapshotRecords to a list, which we will check for presence of failed snapshot
Pipeline p = Pipeline.create();
p.drawFrom(Sources.mapJournal(snapshotsMapName(job.getId()), event -> event.getNewValue() instanceof SnapshotRecord && ((SnapshotRecord) event.getNewValue()).status() == SnapshotStatus.FAILED, EventJournalMapEvent::getNewValue, JournalInitialPosition.START_FROM_OLDEST)).peek().drainTo(Sinks.list("failed_snapshot_records"));
instance1.newJob(p);
job.join();
assertEquals("numPartitions", numPartitions, results.size());
assertEquals("offset partition 0", numElements - 1, results.get(0));
assertEquals("offset partition 1", numElements - 1, results.get(1));
assertTrue("no failure occurred in store", storeFailed);
assertFalse("no failed snapshot appeared in snapshotsMap", instance1.getList("failed_snapshot_records").isEmpty());
}
Aggregations