use of com.hazelcast.jet.core.test.TestOutbox in project hazelcast by hazelcast.
the class StreamEventJournalPTest method when_lostItems_afterRestore.
@Test
public void when_lostItems_afterRestore() throws Exception {
TestOutbox outbox = new TestOutbox(new int[] { 16 }, 16);
final Processor p = supplier.get();
p.init(outbox, new TestProcessorContext().setHazelcastInstance(instance));
List<Object> output = new ArrayList<>();
assertTrueEventually(() -> {
assertFalse("Processor should never complete", p.complete());
outbox.drainQueueAndReset(0, output, true);
assertTrue("consumed different number of items than expected", output.size() == 0);
}, 3);
assertTrueEventually(() -> {
assertTrue("Processor did not finish snapshot", p.saveToSnapshot());
}, 3);
// overflow journal
fillJournal(CAPACITY_PER_PARTITION + 1);
List<Entry> snapshotItems = new ArrayList<>();
outbox.drainSnapshotQueueAndReset(snapshotItems, false);
logger.info("Restoring journal");
// restore from snapshot
assertRestore(snapshotItems);
}
use of com.hazelcast.jet.core.test.TestOutbox in project hazelcast by hazelcast.
the class StreamEventJournalPTest method when_futureSequence_thenResetOffset.
@Test
public void when_futureSequence_thenResetOffset() throws Exception {
TestOutbox outbox = new TestOutbox(new int[] { 16 }, 16);
StreamEventJournalP p = (StreamEventJournalP) supplier.get();
// fill journal so that it overflows
fillJournal(CAPACITY_PER_PARTITION + 1);
// initial offsets will be 5, since capacity per partition is 5
p.init(outbox, new TestProcessorContext().setHazelcastInstance(instance));
// clear partitions before doing any read, but after initializing offsets
map.destroy();
// when we consume, we should not retrieve anything because we will ask for
// offset 5, but current head is 0. This should not cause any error
List<Object> actual = new ArrayList<>();
// we should not receive any items, but the offset should be reset back to 0
assertTrueFiveSeconds(() -> {
assertFalse("Processor should never complete", p.complete());
outbox.drainQueueAndReset(0, actual, true);
assertTrue("consumed different number of items than expected", actual.size() == 0);
});
// add one item to each partition
fillJournal(1);
// receive the items we just added
assertTrueEventually(() -> {
assertFalse("Processor should never complete", p.complete());
outbox.drainQueueAndReset(0, actual, true);
assertTrue("consumed different number of items than expected", actual.size() == 2);
});
}
use of com.hazelcast.jet.core.test.TestOutbox in project hazelcast by hazelcast.
the class SlidingWindowP_failoverTest method init.
private void init(ProcessingGuarantee guarantee) throws Exception {
SlidingWindowPolicy wDef = SlidingWindowPolicy.tumblingWinPolicy(1);
AggregateOperation1<Object, LongAccumulator, Long> aggrOp = counting();
p = new SlidingWindowP<>(singletonList(entryKey()), singletonList((ToLongFunctionEx<Entry<?, Long>>) Entry::getValue), wDef, 0L, aggrOp, KeyedWindowResult::new, true);
Outbox outbox = new TestOutbox(128);
Context context = new TestProcessorContext().setProcessingGuarantee(guarantee);
p.init(outbox, context);
}
use of com.hazelcast.jet.core.test.TestOutbox in project hazelcast by hazelcast.
the class WriteKafkaPTest method when_transactionRolledBackHeuristically_then_sinkIgnoresIt.
@Test
public void when_transactionRolledBackHeuristically_then_sinkIgnoresIt() throws Exception {
/*
Design of the test:
We'll create a processor, process 1 item and do phase-1 of the snapshot and then throw
it away. Then we'll create a new processor and will try to restore the snapshot. It should
try to commit the transaction from the previous processor, but that transaction timed out,
which should be logged and ignored.
*/
int txnTimeout = 2000;
properties.setProperty("transaction.timeout.ms", String.valueOf(txnTimeout));
Processor processor = WriteKafkaP.supplier(properties, o -> new ProducerRecord<>(topic, o), true).get();
TestOutbox outbox = new TestOutbox(new int[0], 1024);
TestProcessorContext procContext = new TestProcessorContext().setProcessingGuarantee(ProcessingGuarantee.EXACTLY_ONCE);
processor.init(outbox, procContext);
TestInbox inbox = new TestInbox();
inbox.add("foo");
processor.process(0, inbox);
assertEquals("inbox size", 0, inbox.size());
assertTrue(processor.saveToSnapshot());
processor.close();
inbox.addAll(outbox.snapshotQueue());
// transaction.abort.timed.out.transaction.cleanup.interval.ms is set to 200, allow it to kick in
sleepMillis(txnTimeout + 1000);
// create the 2nd processor
processor = WriteKafkaP.supplier(properties, o -> new ProducerRecord<>(topic, o), true).get();
processor.init(outbox, procContext);
processor.restoreFromSnapshot(inbox);
processor.finishSnapshotRestore();
}
use of com.hazelcast.jet.core.test.TestOutbox in project hazelcast by hazelcast.
the class StreamKafkaPTest method when_snapshotSaved_then_offsetsRestored.
@Test
public void when_snapshotSaved_then_offsetsRestored() throws Exception {
StreamKafkaP processor = createProcessor(properties(), 2, r -> entry(r.key(), r.value()), 10_000);
TestOutbox outbox = new TestOutbox(new int[] { 10 }, 10);
processor.init(outbox, new TestProcessorContext().setProcessingGuarantee(EXACTLY_ONCE));
kafkaTestSupport.produce(topic1Name, 0, "0");
assertEquals(entry(0, "0"), consumeEventually(processor, outbox));
// create snapshot
TestInbox snapshot = saveSnapshot(processor, outbox);
Set<Entry<Object, Object>> snapshotItems = unwrapBroadcastKey(snapshot.queue());
// consume one more item
kafkaTestSupport.produce(topic1Name, 1, "1");
assertEquals(entry(1, "1"), consumeEventually(processor, outbox));
// create new processor and restore snapshot
processor = createProcessor(properties(), 2, r -> entry(r.key(), r.value()), 10_000);
outbox = new TestOutbox(new int[] { 10 }, 10);
processor.init(outbox, new TestProcessorContext().setProcessingGuarantee(EXACTLY_ONCE));
// restore snapshot
processor.restoreFromSnapshot(snapshot);
assertTrue("snapshot not fully processed", snapshot.isEmpty());
TestInbox snapshot2 = saveSnapshot(processor, outbox);
assertEquals("new snapshot not equal after restore", snapshotItems, unwrapBroadcastKey(snapshot2.queue()));
// the second item should be produced one more time
assertEquals(entry(1, "1"), consumeEventually(processor, outbox));
assertNoMoreItems(processor, outbox);
}
Aggregations