use of com.hazelcast.jet.config.ProcessingGuarantee.EXACTLY_ONCE in project hazelcast by hazelcast.
the class AsyncTransformUsingServiceBatchP_IntegrationTest method before.
@Before
public void before() {
journaledMap = instance().getMap(randomMapName("journaledMap"));
journaledMap.putAll(IntStream.range(0, NUM_ITEMS).boxed().collect(toMap(i -> i, i -> i)));
sinkList = instance().getList(randomMapName("sinkList"));
jobConfig = new JobConfig().setProcessingGuarantee(EXACTLY_ONCE).setSnapshotIntervalMillis(0);
serviceFactory = sharedService(pctx -> Executors.newFixedThreadPool(8), ExecutorService::shutdown);
}
use of com.hazelcast.jet.config.ProcessingGuarantee.EXACTLY_ONCE in project hazelcast by hazelcast.
the class JmsSourceIntegrationTestBase method stressTest.
private void stressTest(boolean graceful, ProcessingGuarantee maxGuarantee, boolean useTopic) throws Exception {
lastListInStressTest = null;
final int MESSAGE_COUNT = 4_000;
Pipeline p = Pipeline.create();
String destName = "queue-" + counter++;
JmsSourceBuilder sourceBuilder;
if (useTopic) {
sourceBuilder = Sources.jmsTopicBuilder(getConnectionFactory()).sharedConsumer(true).consumerFn(s -> s.createSharedDurableConsumer(s.createTopic(destName), "foo-consumer"));
// create the durable subscriber now so that it doesn't lose the initial messages
try (Connection conn = getConnectionFactory().get().createConnection()) {
conn.setClientID("foo-client-id");
try (Session sess = conn.createSession(false, DUPS_OK_ACKNOWLEDGE)) {
sess.createDurableSubscriber(sess.createTopic(destName), "foo-consumer");
}
}
} else {
sourceBuilder = Sources.jmsQueueBuilder(getConnectionFactory()).destinationName(destName);
}
p.readFrom(sourceBuilder.maxGuarantee(maxGuarantee).build(msg -> Long.parseLong(((TextMessage) msg).getText()))).withoutTimestamps().peek().mapStateful(CopyOnWriteArrayList<Long>::new, (list, item) -> {
lastListInStressTest = list;
list.add(item);
return null;
}).writeTo(Sinks.logger());
Job job = instance().getJet().newJob(p, new JobConfig().setProcessingGuarantee(ProcessingGuarantee.EXACTLY_ONCE).setSnapshotIntervalMillis(50));
assertJobStatusEventually(job, RUNNING);
// start a producer that will produce MESSAGE_COUNT messages on the background to the queue, 1000 msgs/s
@SuppressWarnings("rawtypes") Future producerFuture = spawn(() -> {
try (Connection connection = getConnectionFactory().get().createConnection();
Session session = connection.createSession(false, AUTO_ACKNOWLEDGE);
MessageProducer producer = session.createProducer(useTopic ? session.createTopic(destName) : session.createQueue(destName))) {
long startTime = System.nanoTime();
for (int i = 0; i < MESSAGE_COUNT; i++) {
producer.send(session.createTextMessage(String.valueOf(i)));
Thread.sleep(Math.max(0, i - NANOSECONDS.toMillis(System.nanoTime() - startTime)));
}
} catch (Exception e) {
throw sneakyThrow(e);
}
});
int iteration = 0;
JobRepository jr = new JobRepository(instance());
waitForFirstSnapshot(jr, job.getId(), 20, true);
while (!producerFuture.isDone()) {
Thread.sleep(ThreadLocalRandom.current().nextInt(200));
// We also do it before the first restart to workaround https://issues.apache.org/jira/browse/ARTEMIS-2546
if (iteration++ % 3 == 0) {
waitForNextSnapshot(jr, job.getId(), 20, true);
}
((JobProxy) job).restart(graceful);
assertJobStatusEventually(job, RUNNING);
}
// call for the side-effect of throwing if the producer failed
producerFuture.get();
assertTrueEventually(() -> {
Map<Long, Long> counts = lastListInStressTest.stream().collect(Collectors.groupingBy(Function.identity(), TreeMap::new, Collectors.counting()));
for (long i = 0; i < MESSAGE_COUNT; i++) {
counts.putIfAbsent(i, 0L);
}
String countsStr = "counts: " + counts;
if (maxGuarantee == NONE) {
// we don't assert anything and only wait little more and check that the job didn't fail
sleepSeconds(1);
} else {
// in EXACTLY_ONCE the list must have each item exactly once
// in AT_LEAST_ONCE the list must have each item at least once
assertTrue(countsStr, counts.values().stream().allMatch(cnt -> maxGuarantee == EXACTLY_ONCE ? cnt == 1 : cnt > 0));
}
logger.info(countsStr);
}, 30);
assertEquals(job.getStatus(), RUNNING);
}
use of com.hazelcast.jet.config.ProcessingGuarantee.EXACTLY_ONCE in project hazelcast by hazelcast.
the class SourceBuilderTest method test_nonFaultTolerantSource_processingGuaranteeOn.
@Test
public void test_nonFaultTolerantSource_processingGuaranteeOn() {
StreamSource<Integer> source = SourceBuilder.stream("src", procCtx -> "foo").<Integer>fillBufferFn((ctx, buffer) -> {
buffer.add(0);
Thread.sleep(100);
}).build();
Pipeline p = Pipeline.create();
IList<Integer> result = hz().getList("result-" + UuidUtil.newUnsecureUuidString());
p.readFrom(source).withoutTimestamps().writeTo(Sinks.list(result));
Job job = hz().getJet().newJob(p, new JobConfig().setProcessingGuarantee(EXACTLY_ONCE).setSnapshotIntervalMillis(100));
JobRepository jr = new JobRepository(hz());
waitForFirstSnapshot(jr, job.getId(), 10, true);
job.restart();
assertJobStatusEventually(job, JobStatus.RUNNING);
int currentSize = result.size();
assertTrueEventually(() -> assertTrue(result.size() > currentSize), 5);
}
use of com.hazelcast.jet.config.ProcessingGuarantee.EXACTLY_ONCE in project hazelcast by hazelcast.
the class JmsSourceIntegration_NonSharedClusterTest method when_memberTerminated_then_transactionsRolledBack.
@Test
public void when_memberTerminated_then_transactionsRolledBack() throws Exception {
HazelcastInstance instance1 = createHazelcastInstance();
HazelcastInstance instance2 = createHazelcastInstance();
// use higher number of messages so that each of the parallel processors gets some
JmsTestUtil.sendMessages(getConnectionFactory(), "queue", true, MESSAGE_COUNT);
Pipeline p = Pipeline.create();
IList<String> sinkList = instance1.getList("sinkList");
p.readFrom(Sources.jmsQueueBuilder(JmsSourceIntegration_NonSharedClusterTest::getConnectionFactory).destinationName("queue").build(msg -> ((TextMessage) msg).getText())).withoutTimestamps().writeTo(Sinks.list(sinkList));
instance1.getJet().newJob(p, new JobConfig().setProcessingGuarantee(EXACTLY_ONCE).setSnapshotIntervalMillis(DAYS.toMillis(1)));
assertTrueEventually(() -> assertEquals("expected items not in sink", MESSAGE_COUNT, sinkList.size()), 20);
// Now forcefully shut down the second member. The terminated member
// will NOT roll back its transaction. We'll assert that the
// transactions with processorIndex beyond the current total
// parallelism are rolled back. We assert that each item is emitted
// twice, if this was wrong, the items in the non-rolled-back
// transaction will be stalled and only emitted once, they will be
// emitted after the default Artemis timeout of 5 minutes.
instance2.getLifecycleService().terminate();
assertTrueEventually(() -> assertEquals("items should be emitted twice", MESSAGE_COUNT * 2, sinkList.size()), 30);
}
use of com.hazelcast.jet.config.ProcessingGuarantee.EXACTLY_ONCE in project hazelcast by hazelcast.
the class SinkStressTestUtil method test_withRestarts.
public static void test_withRestarts(@Nonnull HazelcastInstance instance, @Nonnull ILogger logger, @Nonnull Sink<Integer> sink, boolean graceful, boolean exactlyOnce, @Nonnull SupplierEx<List<Integer>> actualItemsSupplier) {
int numItems = 1000;
Pipeline p = Pipeline.create();
p.readFrom(SourceBuilder.stream("src", procCtx -> new int[] { procCtx.globalProcessorIndex() == 0 ? 0 : Integer.MAX_VALUE }).<Integer>fillBufferFn((ctx, buf) -> {
if (ctx[0] < numItems) {
buf.add(ctx[0]++);
sleepMillis(5);
}
}).distributed(1).createSnapshotFn(ctx -> ctx[0] < Integer.MAX_VALUE ? ctx[0] : null).restoreSnapshotFn((ctx, state) -> ctx[0] = ctx[0] != Integer.MAX_VALUE ? state.get(0) : Integer.MAX_VALUE).build()).withoutTimestamps().peek().writeTo(sink);
JobConfig config = new JobConfig().setProcessingGuarantee(exactlyOnce ? EXACTLY_ONCE : AT_LEAST_ONCE).setSnapshotIntervalMillis(50);
JobProxy job = (JobProxy) instance.getJet().newJob(p, config);
long endTime = System.nanoTime() + SECONDS.toNanos(TEST_TIMEOUT_SECONDS);
int lastCount = 0;
String expectedRows = IntStream.range(0, numItems).mapToObj(i -> i + (exactlyOnce ? "=1" : "")).collect(joining("\n"));
// We'll restart once, then restart again after a short sleep (possibly during initialization),
// and then assert some output so that the test isn't constantly restarting without any progress
Long lastExecutionId = null;
for (; ; ) {
lastExecutionId = assertJobRunningEventually(instance, job, lastExecutionId);
job.restart(graceful);
lastExecutionId = assertJobRunningEventually(instance, job, lastExecutionId);
sleepMillis(ThreadLocalRandom.current().nextInt(400));
job.restart(graceful);
try {
List<Integer> actualItems;
Set<Integer> distinctActualItems;
do {
actualItems = actualItemsSupplier.get();
distinctActualItems = new HashSet<>(actualItems);
} while (distinctActualItems.size() < Math.min(numItems, 100 + lastCount) && System.nanoTime() < endTime);
lastCount = distinctActualItems.size();
logger.info("number of committed items in the sink so far: " + lastCount);
if (exactlyOnce) {
String actualItemsStr = actualItems.stream().collect(groupingBy(identity(), TreeMap::new, counting())).entrySet().stream().map(Object::toString).collect(joining("\n"));
assertEquals(expectedRows, actualItemsStr);
} else {
assertEquals(expectedRows, distinctActualItems.stream().map(Objects::toString).collect(joining("\n")));
}
// if content matches, break the loop. Otherwise restart and try again
break;
} catch (AssertionError e) {
if (System.nanoTime() >= endTime) {
throw e;
}
}
}
}
Aggregations