use of com.hazelcast.collection.IList in project hazelcast by hazelcast.
the class SourceBuilderTest method test_nonFaultTolerantSource_processingGuaranteeOn.
@Test
public void test_nonFaultTolerantSource_processingGuaranteeOn() {
StreamSource<Integer> source = SourceBuilder.stream("src", procCtx -> "foo").<Integer>fillBufferFn((ctx, buffer) -> {
buffer.add(0);
Thread.sleep(100);
}).build();
Pipeline p = Pipeline.create();
IList<Integer> result = hz().getList("result-" + UuidUtil.newUnsecureUuidString());
p.readFrom(source).withoutTimestamps().writeTo(Sinks.list(result));
Job job = hz().getJet().newJob(p, new JobConfig().setProcessingGuarantee(EXACTLY_ONCE).setSnapshotIntervalMillis(100));
JobRepository jr = new JobRepository(hz());
waitForFirstSnapshot(jr, job.getId(), 10, true);
job.restart();
assertJobStatusEventually(job, JobStatus.RUNNING);
int currentSize = result.size();
assertTrueEventually(() -> assertTrue(result.size() > currentSize), 5);
}
use of com.hazelcast.collection.IList in project hazelcast by hazelcast.
the class JmsSourceIntegration_NonSharedClusterTest method when_memberTerminated_then_transactionsRolledBack.
@Test
public void when_memberTerminated_then_transactionsRolledBack() throws Exception {
HazelcastInstance instance1 = createHazelcastInstance();
HazelcastInstance instance2 = createHazelcastInstance();
// use higher number of messages so that each of the parallel processors gets some
JmsTestUtil.sendMessages(getConnectionFactory(), "queue", true, MESSAGE_COUNT);
Pipeline p = Pipeline.create();
IList<String> sinkList = instance1.getList("sinkList");
p.readFrom(Sources.jmsQueueBuilder(JmsSourceIntegration_NonSharedClusterTest::getConnectionFactory).destinationName("queue").build(msg -> ((TextMessage) msg).getText())).withoutTimestamps().writeTo(Sinks.list(sinkList));
instance1.getJet().newJob(p, new JobConfig().setProcessingGuarantee(EXACTLY_ONCE).setSnapshotIntervalMillis(DAYS.toMillis(1)));
assertTrueEventually(() -> assertEquals("expected items not in sink", MESSAGE_COUNT, sinkList.size()), 20);
// Now forcefully shut down the second member. The terminated member
// will NOT roll back its transaction. We'll assert that the
// transactions with processorIndex beyond the current total
// parallelism are rolled back. We assert that each item is emitted
// twice, if this was wrong, the items in the non-rolled-back
// transaction will be stalled and only emitted once, they will be
// emitted after the default Artemis timeout of 5 minutes.
instance2.getLifecycleService().terminate();
assertTrueEventually(() -> assertEquals("items should be emitted twice", MESSAGE_COUNT * 2, sinkList.size()), 30);
}
use of com.hazelcast.collection.IList in project hazelcast by hazelcast.
the class HazelcastConnectorTest method when_readMap_withPredicateAndFunction.
@Test
public void when_readMap_withPredicateAndFunction() {
IMap<Integer, Integer> sourceMap = instance().getMap(sourceName);
range(0, ENTRY_COUNT).forEach(i -> sourceMap.put(i, i));
DAG dag = new DAG();
Vertex source = dag.newVertex("source", readMapP(sourceName, e -> !e.getKey().equals(0), Map.Entry::getKey));
Vertex sink = dag.newVertex("sink", writeListP(sinkName));
dag.edge(between(source, sink));
instance().getJet().newJob(dag).join();
IList<Object> list = instance().getList(sinkName);
assertEquals(ENTRY_COUNT - 1, list.size());
assertFalse(list.contains(0));
assertTrue(list.contains(1));
}
use of com.hazelcast.collection.IList in project hazelcast by hazelcast.
the class MBeanTest method testList.
@Test
public void testList() throws Exception {
IList list = holder.getHz().getList("list");
list.size();
holder.assertMBeanExistEventually("IList", list.getName());
}
use of com.hazelcast.collection.IList in project hazelcast by hazelcast.
the class SourceBuilder_TopologyChangeTest method testTopologyChange.
private void testTopologyChange(Supplier<HazelcastInstance> secondMemberSupplier, Consumer<HazelcastInstance> changeTopologyFn, boolean assertMonotonicity) {
stateRestored = false;
StreamSource<Integer> source = SourceBuilder.timestampedStream("src", ctx -> new NumberGeneratorContext()).<Integer>fillBufferFn((src, buffer) -> {
long expectedCount = NANOSECONDS.toMillis(System.nanoTime() - src.startTime);
expectedCount = Math.min(expectedCount, src.current + 100);
while (src.current < expectedCount) {
buffer.add(src.current, src.current);
src.current++;
}
}).createSnapshotFn(src -> {
System.out.println("Will save " + src.current + " to snapshot");
return src;
}).restoreSnapshotFn((src, states) -> {
stateRestored = true;
assert states.size() == 1;
src.restore(states.get(0));
System.out.println("Restored " + src.current + " from snapshot");
}).build();
Config config = smallInstanceConfig();
// restart sooner after member add
config.getJetConfig().setScaleUpDelayMillis(1000);
HazelcastInstance hz = createHazelcastInstance(config);
HazelcastInstance possibleSecondNode = secondMemberSupplier.get();
long windowSize = 100;
IList<WindowResult<Long>> result = hz.getList("result-" + UuidUtil.newUnsecureUuidString());
Pipeline p = Pipeline.create();
p.readFrom(source).withNativeTimestamps(0).window(tumbling(windowSize)).aggregate(AggregateOperations.counting()).peek().writeTo(Sinks.list(result));
Job job = hz.getJet().newJob(p, new JobConfig().setProcessingGuarantee(EXACTLY_ONCE).setSnapshotIntervalMillis(500));
assertTrueEventually(() -> assertFalse("result list is still empty", result.isEmpty()));
assertJobStatusEventually(job, JobStatus.RUNNING);
JobRepository jr = new JobRepository(hz);
waitForFirstSnapshot(jr, job.getId(), 10, false);
assertFalse(stateRestored);
changeTopologyFn.accept(possibleSecondNode);
assertTrueEventually(() -> assertTrue("restoreSnapshotFn was not called", stateRestored));
// wait until more results are added
int oldSize = result.size();
assertTrueEventually(() -> assertTrue("no more results added to the list", result.size() > oldSize));
cancelAndJoin(job);
// results should contain sequence of results, each with count=windowSize, monotonic, if job was
// allowed to terminate gracefully
Iterator<WindowResult<Long>> iterator = result.iterator();
for (int i = 0; i < result.size(); i++) {
WindowResult<Long> next = iterator.next();
assertEquals(windowSize, (long) next.result());
if (assertMonotonicity) {
assertEquals(i * windowSize, next.start());
}
}
}
Aggregations