Search in sources :

Example 1 with TimestampedEntry

use of com.hazelcast.jet.datamodel.TimestampedEntry in project hazelcast-jet by hazelcast.

the class WindowGroupTransform_IntegrationTest method testSliding_windowFirst_aggregate3.

@Test
public void testSliding_windowFirst_aggregate3() {
    IMap<Long, String> map = instance.getMap("source");
    // key is timestamp
    map.put(0L, "foo");
    map.put(2L, "taz");
    map.put(10L, "flush-item");
    IMap<Long, String> map1 = instance.getMap("source1");
    // key is timestamp
    map1.put(0L, "faa");
    map1.put(2L, "tuu");
    map1.put(10L, "flush-item");
    IMap<Long, String> map2 = instance.getMap("source2");
    // key is timestamp
    map2.put(0L, "fzz");
    map2.put(2L, "tcc");
    map2.put(10L, "flush-item");
    Pipeline p = Pipeline.create();
    StreamStageWithGrouping<Entry<Long, String>, Character> stage1 = p.drawFrom(Sources.<Long, String>mapJournal("source1", START_FROM_OLDEST)).addTimestamps(Entry::getKey, 0).groupingKey(entry -> entry.getValue().charAt(0));
    StreamStageWithGrouping<Entry<Long, String>, Character> stage2 = p.drawFrom(Sources.<Long, String>mapJournal("source2", START_FROM_OLDEST)).addTimestamps(Entry::getKey, 0).groupingKey(entry -> entry.getValue().charAt(0));
    p.drawFrom(Sources.<Long, String>mapJournal("source", START_FROM_OLDEST)).addTimestamps(Entry::getKey, 0).window(WindowDefinition.tumbling(2)).groupingKey(entry -> entry.getValue().charAt(0)).aggregate3(stage1, stage2, toThreeBags()).peek().drainTo(Sinks.list("sink"));
    instance.newJob(p);
    assertTrueEventually(() -> {
        assertEquals(listToString(asList(new TimestampedEntry<>(2, 'f', ThreeBags.threeBags(asList(entry(0L, "foo")), asList(entry(0L, "faa")), asList(entry(0L, "fzz")))), new TimestampedEntry<>(4, 't', ThreeBags.threeBags(asList(entry(2L, "taz")), asList(entry(2L, "tuu")), asList(entry(2L, "tcc")))))), listToString(instance.getHazelcastInstance().getList("sink")));
    }, 5);
}
Also used : JetInstance(com.hazelcast.jet.JetInstance) GroupProperty(com.hazelcast.spi.properties.GroupProperty) RunWith(org.junit.runner.RunWith) EventJournalConfig(com.hazelcast.config.EventJournalConfig) TestUtil.set(com.hazelcast.jet.core.TestUtil.set) HashSet(java.util.HashSet) ThreeBags(com.hazelcast.jet.datamodel.ThreeBags) AggregateOperation(com.hazelcast.jet.aggregate.AggregateOperation) SlidingWindowDef(com.hazelcast.jet.pipeline.SlidingWindowDef) Util.entry(com.hazelcast.jet.Util.entry) Arrays.asList(java.util.Arrays.asList) WindowGroupAggregateBuilder(com.hazelcast.jet.pipeline.WindowGroupAggregateBuilder) StageWithGroupingAndWindow(com.hazelcast.jet.pipeline.StageWithGroupingAndWindow) IList(com.hazelcast.core.IList) Before(org.junit.Before) AggregateOperations.toThreeBags(com.hazelcast.jet.aggregate.AggregateOperations.toThreeBags) JetConfig(com.hazelcast.jet.config.JetConfig) WindowDefinition(com.hazelcast.jet.pipeline.WindowDefinition) Pipeline(com.hazelcast.jet.pipeline.Pipeline) JetTestSupport(com.hazelcast.jet.core.JetTestSupport) AggregateOperations.toTwoBags(com.hazelcast.jet.aggregate.AggregateOperations.toTwoBags) AggregateOperations.toSet(com.hazelcast.jet.aggregate.AggregateOperations.toSet) Tag(com.hazelcast.jet.datamodel.Tag) Sinks(com.hazelcast.jet.pipeline.Sinks) Test(org.junit.Test) START_FROM_OLDEST(com.hazelcast.jet.pipeline.JournalInitialPosition.START_FROM_OLDEST) ParallelTest(com.hazelcast.test.annotation.ParallelTest) Category(org.junit.experimental.categories.Category) Sources(com.hazelcast.jet.pipeline.Sources) IMap(com.hazelcast.core.IMap) JournalInitialPosition(com.hazelcast.jet.pipeline.JournalInitialPosition) StreamStageWithGrouping(com.hazelcast.jet.pipeline.StreamStageWithGrouping) HazelcastParallelClassRunner(com.hazelcast.test.HazelcastParallelClassRunner) Entry(java.util.Map.Entry) TwoBags(com.hazelcast.jet.datamodel.TwoBags) Assert.assertEquals(org.junit.Assert.assertEquals) TimestampedEntry(com.hazelcast.jet.datamodel.TimestampedEntry) WindowResult(com.hazelcast.jet.datamodel.WindowResult) TestSupport.listToString(com.hazelcast.jet.core.test.TestSupport.listToString) Entry(java.util.Map.Entry) TimestampedEntry(com.hazelcast.jet.datamodel.TimestampedEntry) TestSupport.listToString(com.hazelcast.jet.core.test.TestSupport.listToString) Pipeline(com.hazelcast.jet.pipeline.Pipeline) Test(org.junit.Test) ParallelTest(com.hazelcast.test.annotation.ParallelTest)

Example 2 with TimestampedEntry

use of com.hazelcast.jet.datamodel.TimestampedEntry in project hazelcast-jet by hazelcast.

the class WindowGroupTransform_IntegrationTest method testSliding_groupingFirst_withNonStreamingSource.

@Test
public void testSliding_groupingFirst_withNonStreamingSource() {
    IList<Entry<Long, String>> list = instance.getList("source");
    list.add(entry(0L, "foo"));
    list.add(entry(1L, "bar"));
    list.add(entry(2L, "baz"));
    list.add(entry(3L, "booze"));
    Pipeline p = Pipeline.create();
    p.drawFrom(Sources.<Entry<Long, String>>list("source")).addTimestamps(Entry::getKey, 0).groupingKey(entry -> entry.getValue().charAt(0)).window(WindowDefinition.tumbling(2)).aggregate(toSet()).drainTo(Sinks.list("sink"));
    instance.newJob(p).join();
    assertTrueEventually(() -> {
        assertEquals(set(new TimestampedEntry<>(2, 'f', set(entry(0L, "foo"))), new TimestampedEntry<>(2, 'b', set(entry(1L, "bar"))), new TimestampedEntry<>(4, 'b', set(entry(2L, "baz"), entry(3L, "booze")))), new HashSet<>(instance.getHazelcastInstance().getList("sink")));
    }, 5);
}
Also used : JetInstance(com.hazelcast.jet.JetInstance) GroupProperty(com.hazelcast.spi.properties.GroupProperty) RunWith(org.junit.runner.RunWith) EventJournalConfig(com.hazelcast.config.EventJournalConfig) TestUtil.set(com.hazelcast.jet.core.TestUtil.set) HashSet(java.util.HashSet) ThreeBags(com.hazelcast.jet.datamodel.ThreeBags) AggregateOperation(com.hazelcast.jet.aggregate.AggregateOperation) SlidingWindowDef(com.hazelcast.jet.pipeline.SlidingWindowDef) Util.entry(com.hazelcast.jet.Util.entry) Arrays.asList(java.util.Arrays.asList) WindowGroupAggregateBuilder(com.hazelcast.jet.pipeline.WindowGroupAggregateBuilder) StageWithGroupingAndWindow(com.hazelcast.jet.pipeline.StageWithGroupingAndWindow) IList(com.hazelcast.core.IList) Before(org.junit.Before) AggregateOperations.toThreeBags(com.hazelcast.jet.aggregate.AggregateOperations.toThreeBags) JetConfig(com.hazelcast.jet.config.JetConfig) WindowDefinition(com.hazelcast.jet.pipeline.WindowDefinition) Pipeline(com.hazelcast.jet.pipeline.Pipeline) JetTestSupport(com.hazelcast.jet.core.JetTestSupport) AggregateOperations.toTwoBags(com.hazelcast.jet.aggregate.AggregateOperations.toTwoBags) AggregateOperations.toSet(com.hazelcast.jet.aggregate.AggregateOperations.toSet) Tag(com.hazelcast.jet.datamodel.Tag) Sinks(com.hazelcast.jet.pipeline.Sinks) Test(org.junit.Test) START_FROM_OLDEST(com.hazelcast.jet.pipeline.JournalInitialPosition.START_FROM_OLDEST) ParallelTest(com.hazelcast.test.annotation.ParallelTest) Category(org.junit.experimental.categories.Category) Sources(com.hazelcast.jet.pipeline.Sources) IMap(com.hazelcast.core.IMap) JournalInitialPosition(com.hazelcast.jet.pipeline.JournalInitialPosition) StreamStageWithGrouping(com.hazelcast.jet.pipeline.StreamStageWithGrouping) HazelcastParallelClassRunner(com.hazelcast.test.HazelcastParallelClassRunner) Entry(java.util.Map.Entry) TwoBags(com.hazelcast.jet.datamodel.TwoBags) Assert.assertEquals(org.junit.Assert.assertEquals) TimestampedEntry(com.hazelcast.jet.datamodel.TimestampedEntry) WindowResult(com.hazelcast.jet.datamodel.WindowResult) TestSupport.listToString(com.hazelcast.jet.core.test.TestSupport.listToString) Entry(java.util.Map.Entry) TimestampedEntry(com.hazelcast.jet.datamodel.TimestampedEntry) TimestampedEntry(com.hazelcast.jet.datamodel.TimestampedEntry) Pipeline(com.hazelcast.jet.pipeline.Pipeline) Test(org.junit.Test) ParallelTest(com.hazelcast.test.annotation.ParallelTest)

Example 3 with TimestampedEntry

use of com.hazelcast.jet.datamodel.TimestampedEntry in project hazelcast-jet by hazelcast.

the class SlidingWindowPTest method before.

@Before
public void before() {
    SlidingWindowPolicy windowDef = slidingWinPolicy(4, 1);
    AggregateOperation1<Entry<?, Long>, LongAccumulator, Long> operation = AggregateOperation.withCreate(LongAccumulator::new).andAccumulate((LongAccumulator acc, Entry<?, Long> item) -> acc.add(item.getValue())).andCombine(LongAccumulator::add).andDeduct(hasDeduct ? LongAccumulator::subtract : null).andFinish(LongAccumulator::get);
    DistributedFunction<?, Long> keyFn = t -> KEY;
    DistributedToLongFunction<Entry<Long, Long>> timestampFn = Entry::getKey;
    DistributedSupplier<Processor> procSupplier = singleStageProcessor ? aggregateToSlidingWindowP(singletonList(keyFn), singletonList(timestampFn), TimestampKind.EVENT, windowDef, operation, TimestampedEntry::new) : combineToSlidingWindowP(windowDef, operation, TimestampedEntry::new);
    // new supplier to save the last supplied instance
    supplier = () -> lastSuppliedProcessor = (SlidingWindowP) procSupplier.get();
}
Also used : LongAccumulator(com.hazelcast.jet.accumulator.LongAccumulator) Collections.shuffle(java.util.Collections.shuffle) Arrays(java.util.Arrays) DistributedToLongFunction(com.hazelcast.jet.function.DistributedToLongFunction) RunWith(org.junit.runner.RunWith) Parameters(org.junit.runners.Parameterized.Parameters) Processor(com.hazelcast.jet.core.Processor) TimestampKind(com.hazelcast.jet.core.TimestampKind) TestSupport.verifyProcessor(com.hazelcast.jet.core.test.TestSupport.verifyProcessor) SlidingWindowPolicy.slidingWinPolicy(com.hazelcast.jet.core.SlidingWindowPolicy.slidingWinPolicy) ArrayList(java.util.ArrayList) Collections.singletonList(java.util.Collections.singletonList) Watermark(com.hazelcast.jet.core.Watermark) SlidingWindowPolicy(com.hazelcast.jet.core.SlidingWindowPolicy) Processors.aggregateToSlidingWindowP(com.hazelcast.jet.core.processor.Processors.aggregateToSlidingWindowP) AggregateOperation(com.hazelcast.jet.aggregate.AggregateOperation) Util.entry(com.hazelcast.jet.Util.entry) Arrays.asList(java.util.Arrays.asList) After(org.junit.After) DistributedFunction(com.hazelcast.jet.function.DistributedFunction) Processors.combineToSlidingWindowP(com.hazelcast.jet.core.processor.Processors.combineToSlidingWindowP) ExpectedException(org.junit.rules.ExpectedException) Parameterized(org.junit.runners.Parameterized) Before(org.junit.Before) DistributedSupplier(com.hazelcast.jet.function.DistributedSupplier) LongStream(java.util.stream.LongStream) Parameter(org.junit.runners.Parameterized.Parameter) Collection(java.util.Collection) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) AggregateOperation1(com.hazelcast.jet.aggregate.AggregateOperation1) ParallelTest(com.hazelcast.test.annotation.ParallelTest) Category(org.junit.experimental.categories.Category) List(java.util.List) Collectors.toList(java.util.stream.Collectors.toList) Rule(org.junit.Rule) LongAccumulator(com.hazelcast.jet.accumulator.LongAccumulator) Entry(java.util.Map.Entry) HazelcastParametersRunnerFactory(com.hazelcast.test.HazelcastParametersRunnerFactory) TimestampedEntry(com.hazelcast.jet.datamodel.TimestampedEntry) Entry(java.util.Map.Entry) TimestampedEntry(com.hazelcast.jet.datamodel.TimestampedEntry) SlidingWindowPolicy(com.hazelcast.jet.core.SlidingWindowPolicy) Processor(com.hazelcast.jet.core.Processor) TestSupport.verifyProcessor(com.hazelcast.jet.core.test.TestSupport.verifyProcessor) Processors.aggregateToSlidingWindowP(com.hazelcast.jet.core.processor.Processors.aggregateToSlidingWindowP) Processors.combineToSlidingWindowP(com.hazelcast.jet.core.processor.Processors.combineToSlidingWindowP) Before(org.junit.Before)

Example 4 with TimestampedEntry

use of com.hazelcast.jet.datamodel.TimestampedEntry in project hazelcast-jet by hazelcast.

the class SlidingWindowP_twoStageSnapshotTest method before.

@Before
public void before() {
    SlidingWindowPolicy windowDef = slidingWinPolicy(4, 1);
    AggregateOperation1<Entry<?, Long>, LongAccumulator, Long> aggrOp = AggregateOperation.withCreate(LongAccumulator::new).andAccumulate((LongAccumulator acc, Entry<?, Long> item) -> acc.add(item.getValue())).andCombine(LongAccumulator::add).andDeduct(LongAccumulator::subtract).andFinish(LongAccumulator::get);
    DistributedSupplier<Processor> procSupplier1 = Processors.accumulateByFrameP(singletonList((DistributedFunction<? super Entry<Long, Long>, ?>) t -> KEY), singletonList((DistributedToLongFunction<? super Entry<Long, Long>>) Entry::getKey), TimestampKind.EVENT, windowDef, ((AggregateOperation1<? super Entry<Long, Long>, LongAccumulator, ?>) aggrOp).withFinishFn(identity()));
    DistributedSupplier<Processor> procSupplier2 = combineToSlidingWindowP(windowDef, aggrOp, TimestampedEntry::new);
    // new supplier to save the last supplied instance
    stage1Supplier = () -> lastSuppliedStage1Processor = (SlidingWindowP<?, ?, ?, ?>) procSupplier1.get();
    stage2Supplier = () -> lastSuppliedStage2Processor = (SlidingWindowP<?, ?, ?, ?>) procSupplier2.get();
}
Also used : LongAccumulator(com.hazelcast.jet.accumulator.LongAccumulator) Entry(java.util.Map.Entry) TimestampedEntry(com.hazelcast.jet.datamodel.TimestampedEntry) SlidingWindowPolicy(com.hazelcast.jet.core.SlidingWindowPolicy) Processor(com.hazelcast.jet.core.Processor) TimestampedEntry(com.hazelcast.jet.datamodel.TimestampedEntry) DistributedToLongFunction(com.hazelcast.jet.function.DistributedToLongFunction) Processors.combineToSlidingWindowP(com.hazelcast.jet.core.processor.Processors.combineToSlidingWindowP) DistributedFunction(com.hazelcast.jet.function.DistributedFunction) Before(org.junit.Before)

Example 5 with TimestampedEntry

use of com.hazelcast.jet.datamodel.TimestampedEntry in project hazelcast-jet by hazelcast.

the class JobRestartWithSnapshotTest method when_nodeDown_then_jobRestartsFromSnapshot.

public void when_nodeDown_then_jobRestartsFromSnapshot(boolean twoStage) throws Exception {
    /* Design of this test:

        It uses random partitioned generator of source events. The events are Map.Entry(partitionId, timestamp).
        For each partition timestamps from 0..elementsInPartition are generated.

        We start the test with two nodes and localParallelism(1) for source. Source instances generate items at
        the same rate of 10 per second: this causes one instance to be twice as fast as the other in terms of
        timestamp. The source processor saves partition offsets similarly to how streamKafka() and streamMap()
        do.

        After some time we shut down one instance. The job restarts from snapshot and all partitions are restored
        to single source processor instance. Partition offsets are very different, so the source is written in a way
        that it emits from the most-behind partition in order to not emit late events from more ahead partitions.

        Local parallelism of InsertWatermarkP is also 1 to avoid the edge case when different instances of
        InsertWatermarkP might initialize with first event in different frame and make them start the no-gap
        emission from different WM, which might cause the SlidingWindowP downstream to miss some of the
        first windows.

        The sink writes to an IMap which is an idempotent sink.

        The resulting contents of the sink map are compared to expected value.
         */
    DAG dag = new DAG();
    SlidingWindowPolicy wDef = SlidingWindowPolicy.tumblingWinPolicy(3);
    AggregateOperation1<Object, LongAccumulator, Long> aggrOp = counting();
    IMap<List<Long>, Long> result = instance1.getMap("result");
    result.clear();
    SequencesInPartitionsMetaSupplier sup = new SequencesInPartitionsMetaSupplier(3, 180);
    Vertex generator = dag.newVertex("generator", throttle(sup, 30)).localParallelism(1);
    Vertex insWm = dag.newVertex("insWm", insertWatermarksP(wmGenParams(entry -> ((Entry<Integer, Integer>) entry).getValue(), limitingLag(0), emitByFrame(wDef), -1))).localParallelism(1);
    Vertex map = dag.newVertex("map", mapP((TimestampedEntry e) -> entry(asList(e.getTimestamp(), (long) (int) e.getKey()), e.getValue())));
    Vertex writeMap = dag.newVertex("writeMap", SinkProcessors.writeMapP("result"));
    if (twoStage) {
        Vertex aggregateStage1 = dag.newVertex("aggregateStage1", Processors.accumulateByFrameP(singletonList((DistributedFunction<? super Object, ?>) t -> ((Entry<Integer, Integer>) t).getKey()), singletonList(t1 -> ((Entry<Integer, Integer>) t1).getValue()), TimestampKind.EVENT, wDef, aggrOp.withFinishFn(identity())));
        Vertex aggregateStage2 = dag.newVertex("aggregateStage2", combineToSlidingWindowP(wDef, aggrOp, TimestampedEntry::new));
        dag.edge(between(insWm, aggregateStage1).partitioned(entryKey())).edge(between(aggregateStage1, aggregateStage2).distributed().partitioned(entryKey())).edge(between(aggregateStage2, map));
    } else {
        Vertex aggregate = dag.newVertex("aggregate", Processors.aggregateToSlidingWindowP(singletonList((DistributedFunction<Object, Integer>) t -> ((Entry<Integer, Integer>) t).getKey()), singletonList(t1 -> ((Entry<Integer, Integer>) t1).getValue()), TimestampKind.EVENT, wDef, aggrOp, TimestampedEntry::new));
        dag.edge(between(insWm, aggregate).distributed().partitioned(entryKey())).edge(between(aggregate, map));
    }
    dag.edge(between(generator, insWm)).edge(between(map, writeMap));
    JobConfig config = new JobConfig();
    config.setProcessingGuarantee(ProcessingGuarantee.EXACTLY_ONCE);
    config.setSnapshotIntervalMillis(1200);
    Job job = instance1.newJob(dag, config);
    SnapshotRepository snapshotRepository = new SnapshotRepository(instance1);
    int timeout = (int) (MILLISECONDS.toSeconds(config.getSnapshotIntervalMillis()) + 2);
    // wait until we have at least one snapshot
    IMapJet<Long, Object> snapshotsMap = snapshotRepository.getSnapshotMap(job.getId());
    assertTrueEventually(() -> assertTrue("No snapshot produced", snapshotsMap.entrySet().stream().anyMatch(en -> en.getValue() instanceof SnapshotRecord && ((SnapshotRecord) en.getValue()).isSuccessful())), timeout);
    waitForNextSnapshot(snapshotsMap, timeout);
    // wait a little more to emit something, so that it will be overwritten in the sink map
    Thread.sleep(300);
    instance2.shutdown();
    // Now the job should detect member shutdown and restart from snapshot.
    // Let's wait until the next snapshot appears.
    waitForNextSnapshot(snapshotsMap, (int) (MILLISECONDS.toSeconds(config.getSnapshotIntervalMillis()) + 10));
    waitForNextSnapshot(snapshotsMap, timeout);
    job.join();
    // compute expected result
    Map<List<Long>, Long> expectedMap = new HashMap<>();
    for (long partition = 0; partition < sup.numPartitions; partition++) {
        long cnt = 0;
        for (long value = 1; value <= sup.elementsInPartition; value++) {
            cnt++;
            if (value % wDef.frameSize() == 0) {
                expectedMap.put(asList(value, partition), cnt);
                cnt = 0;
            }
        }
        if (cnt > 0) {
            expectedMap.put(asList(wDef.higherFrameTs(sup.elementsInPartition - 1), partition), cnt);
        }
    }
    // check expected result
    if (!expectedMap.equals(result)) {
        System.out.println("All expected entries: " + expectedMap.entrySet().stream().map(Object::toString).collect(joining(", ")));
        System.out.println("All actual entries: " + result.entrySet().stream().map(Object::toString).collect(joining(", ")));
        System.out.println("Non-received expected items: " + expectedMap.keySet().stream().filter(key -> !result.containsKey(key)).map(Object::toString).collect(joining(", ")));
        System.out.println("Received non-expected items: " + result.entrySet().stream().filter(entry -> !expectedMap.containsKey(entry.getKey())).map(Object::toString).collect(joining(", ")));
        System.out.println("Different keys: ");
        for (Entry<List<Long>, Long> rEntry : result.entrySet()) {
            Long expectedValue = expectedMap.get(rEntry.getKey());
            if (expectedValue != null && !expectedValue.equals(rEntry.getValue())) {
                System.out.println("key: " + rEntry.getKey() + ", expected value: " + expectedValue + ", actual value: " + rEntry.getValue());
            }
        }
        System.out.println("-- end of different keys");
        assertEquals(expectedMap, new HashMap<>(result));
    }
    assertTrue("Snapshots map not empty after job finished", snapshotsMap.isEmpty());
}
Also used : AggregateOperations.counting(com.hazelcast.jet.aggregate.AggregateOperations.counting) Traverser(com.hazelcast.jet.Traverser) Arrays(java.util.Arrays) WatermarkPolicies.limitingLag(com.hazelcast.jet.core.WatermarkPolicies.limitingLag) SnapshotContext(com.hazelcast.jet.impl.execution.SnapshotContext) PacketFiltersUtil.delayOperationsFrom(com.hazelcast.test.PacketFiltersUtil.delayOperationsFrom) IMapJet(com.hazelcast.jet.IMapJet) Address(com.hazelcast.nio.Address) Processors.mapP(com.hazelcast.jet.core.processor.Processors.mapP) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) Map(java.util.Map) WatermarkGenerationParams.wmGenParams(com.hazelcast.jet.core.WatermarkGenerationParams.wmGenParams) WatermarkEmissionPolicy.emitByFrame(com.hazelcast.jet.core.WatermarkEmissionPolicy.emitByFrame) SnapshotRepository(com.hazelcast.jet.impl.SnapshotRepository) ExecutionContext(com.hazelcast.jet.impl.execution.ExecutionContext) JobConfig(com.hazelcast.jet.config.JobConfig) MILLISECONDS(java.util.concurrent.TimeUnit.MILLISECONDS) Collectors.joining(java.util.stream.Collectors.joining) List(java.util.List) BroadcastKey.broadcastKey(com.hazelcast.jet.core.BroadcastKey.broadcastKey) TestSupport(com.hazelcast.jet.core.test.TestSupport) Assert.assertFalse(org.junit.Assert.assertFalse) SinkProcessors(com.hazelcast.jet.core.processor.SinkProcessors) Entry(java.util.Map.Entry) Util.arrayIndexOf(com.hazelcast.jet.impl.util.Util.arrayIndexOf) IntStream(java.util.stream.IntStream) JetInstance(com.hazelcast.jet.JetInstance) SnapshotRecord(com.hazelcast.jet.impl.execution.SnapshotRecord) RunWith(org.junit.runner.RunWith) Processors(com.hazelcast.jet.core.processor.Processors) HashMap(java.util.HashMap) JetInitDataSerializerHook(com.hazelcast.jet.impl.execution.init.JetInitDataSerializerHook) Function(java.util.function.Function) HazelcastSerialClassRunner(com.hazelcast.test.HazelcastSerialClassRunner) TestUtil.throttle(com.hazelcast.jet.core.TestUtil.throttle) Util.entry(com.hazelcast.jet.Util.entry) DistributedFunction(com.hazelcast.jet.function.DistributedFunction) Processors.combineToSlidingWindowP(com.hazelcast.jet.core.processor.Processors.combineToSlidingWindowP) Comparator.comparing(java.util.Comparator.comparing) ExpectedException(org.junit.rules.ExpectedException) Nonnull(javax.annotation.Nonnull) TestProcessorMetaSupplierContext(com.hazelcast.jet.core.test.TestProcessorMetaSupplierContext) Processors.insertWatermarksP(com.hazelcast.jet.core.processor.Processors.insertWatermarksP) Job(com.hazelcast.jet.Job) Before(org.junit.Before) DistributedFunction.identity(com.hazelcast.jet.function.DistributedFunction.identity) JobRepository(com.hazelcast.jet.impl.JobRepository) JetConfig(com.hazelcast.jet.config.JetConfig) Iterator(java.util.Iterator) Assert.assertNotNull(org.junit.Assert.assertNotNull) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) AggregateOperation1(com.hazelcast.jet.aggregate.AggregateOperation1) Traversers(com.hazelcast.jet.Traversers) DistributedFunctions.entryKey(com.hazelcast.jet.function.DistributedFunctions.entryKey) Collectors.toList(java.util.stream.Collectors.toList) IMap(com.hazelcast.core.IMap) Rule(org.junit.Rule) Ignore(org.junit.Ignore) LongAccumulator(com.hazelcast.jet.accumulator.LongAccumulator) Processors.noopP(com.hazelcast.jet.core.processor.Processors.noopP) ProcessingGuarantee(com.hazelcast.jet.config.ProcessingGuarantee) Assert.assertEquals(org.junit.Assert.assertEquals) TimestampedEntry(com.hazelcast.jet.datamodel.TimestampedEntry) Edge.between(com.hazelcast.jet.core.Edge.between) SinkProcessors.writeListP(com.hazelcast.jet.core.processor.SinkProcessors.writeListP) TimestampedEntry(com.hazelcast.jet.datamodel.TimestampedEntry) HashMap(java.util.HashMap) JobConfig(com.hazelcast.jet.config.JobConfig) Entry(java.util.Map.Entry) TimestampedEntry(com.hazelcast.jet.datamodel.TimestampedEntry) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) List(java.util.List) Collectors.toList(java.util.stream.Collectors.toList) Job(com.hazelcast.jet.Job) SnapshotRepository(com.hazelcast.jet.impl.SnapshotRepository) SnapshotRecord(com.hazelcast.jet.impl.execution.SnapshotRecord) LongAccumulator(com.hazelcast.jet.accumulator.LongAccumulator)

Aggregations

TimestampedEntry (com.hazelcast.jet.datamodel.TimestampedEntry)9 Entry (java.util.Map.Entry)9 Util.entry (com.hazelcast.jet.Util.entry)7 Arrays.asList (java.util.Arrays.asList)7 Before (org.junit.Before)7 Test (org.junit.Test)7 RunWith (org.junit.runner.RunWith)7 ParallelTest (com.hazelcast.test.annotation.ParallelTest)6 Category (org.junit.experimental.categories.Category)6 IMap (com.hazelcast.core.IMap)5 JetInstance (com.hazelcast.jet.JetInstance)5 AggregateOperation (com.hazelcast.jet.aggregate.AggregateOperation)5 JetConfig (com.hazelcast.jet.config.JetConfig)5 Assert.assertEquals (org.junit.Assert.assertEquals)5 EventJournalConfig (com.hazelcast.config.EventJournalConfig)4 IList (com.hazelcast.core.IList)4 LongAccumulator (com.hazelcast.jet.accumulator.LongAccumulator)4 AggregateOperations.toSet (com.hazelcast.jet.aggregate.AggregateOperations.toSet)4 AggregateOperations.toThreeBags (com.hazelcast.jet.aggregate.AggregateOperations.toThreeBags)4 AggregateOperations.toTwoBags (com.hazelcast.jet.aggregate.AggregateOperations.toTwoBags)4