use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class CountTriggerTest method testMergingWindows.
@Test
public void testMergingWindows() throws Exception {
TriggerTestHarness<Object, TimeWindow> testHarness = new TriggerTestHarness<>(CountTrigger.<TimeWindow>of(3), new TimeWindow.Serializer());
assertEquals(TriggerResult.CONTINUE, testHarness.processElement(new StreamRecord<Object>(1), new TimeWindow(0, 2)));
assertEquals(TriggerResult.CONTINUE, testHarness.processElement(new StreamRecord<Object>(1), new TimeWindow(2, 4)));
assertEquals(TriggerResult.CONTINUE, testHarness.processElement(new StreamRecord<Object>(1), new TimeWindow(4, 6)));
// shouldn't have any timers
assertEquals(0, testHarness.numProcessingTimeTimers());
assertEquals(0, testHarness.numEventTimeTimers());
assertEquals(3, testHarness.numStateEntries());
assertEquals(1, testHarness.numStateEntries(new TimeWindow(0, 2)));
assertEquals(1, testHarness.numStateEntries(new TimeWindow(2, 4)));
assertEquals(1, testHarness.numStateEntries(new TimeWindow(4, 6)));
testHarness.mergeWindows(new TimeWindow(0, 4), Lists.newArrayList(new TimeWindow(0, 2), new TimeWindow(2, 4)));
assertEquals(2, testHarness.numStateEntries());
assertEquals(0, testHarness.numStateEntries(new TimeWindow(0, 2)));
assertEquals(0, testHarness.numStateEntries(new TimeWindow(2, 4)));
assertEquals(1, testHarness.numStateEntries(new TimeWindow(0, 4)));
assertEquals(1, testHarness.numStateEntries(new TimeWindow(4, 6)));
assertEquals(TriggerResult.FIRE, testHarness.processElement(new StreamRecord<Object>(1), new TimeWindow(0, 4)));
assertEquals(1, testHarness.numStateEntries());
assertEquals(0, testHarness.numStateEntries(new TimeWindow(0, 4)));
assertEquals(1, testHarness.numStateEntries(new TimeWindow(4, 6)));
assertEquals(TriggerResult.CONTINUE, testHarness.processElement(new StreamRecord<Object>(1), new TimeWindow(4, 6)));
assertEquals(TriggerResult.FIRE, testHarness.processElement(new StreamRecord<Object>(1), new TimeWindow(4, 6)));
assertEquals(0, testHarness.numStateEntries());
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class CountTriggerTest method testClear.
/**
* Verify that clear() does not leak across windows.
*/
@Test
public void testClear() throws Exception {
TriggerTestHarness<Object, TimeWindow> testHarness = new TriggerTestHarness<>(CountTrigger.<TimeWindow>of(3), new TimeWindow.Serializer());
assertEquals(TriggerResult.CONTINUE, testHarness.processElement(new StreamRecord<Object>(1), new TimeWindow(0, 2)));
assertEquals(TriggerResult.CONTINUE, testHarness.processElement(new StreamRecord<Object>(1), new TimeWindow(2, 4)));
// shouldn't have any timers
assertEquals(0, testHarness.numProcessingTimeTimers());
assertEquals(0, testHarness.numEventTimeTimers());
assertEquals(2, testHarness.numStateEntries());
assertEquals(1, testHarness.numStateEntries(new TimeWindow(0, 2)));
assertEquals(1, testHarness.numStateEntries(new TimeWindow(2, 4)));
testHarness.clearTriggerState(new TimeWindow(2, 4));
assertEquals(1, testHarness.numStateEntries());
assertEquals(1, testHarness.numStateEntries(new TimeWindow(0, 2)));
assertEquals(0, testHarness.numStateEntries(new TimeWindow(2, 4)));
testHarness.clearTriggerState(new TimeWindow(0, 2));
assertEquals(0, testHarness.numStateEntries());
assertEquals(0, testHarness.numStateEntries(new TimeWindow(0, 2)));
assertEquals(0, testHarness.numStateEntries(new TimeWindow(2, 4)));
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class EvictingWindowOperatorTest method testDeltaEvictorEvictBefore.
/**
* Tests DeltaEvictor, evictBefore behavior
* @throws Exception
*/
@Test
public void testDeltaEvictorEvictBefore() throws Exception {
AtomicInteger closeCalled = new AtomicInteger(0);
final int TRIGGER_COUNT = 2;
final boolean EVICT_AFTER = false;
final int THRESHOLD = 2;
TypeInformation<Tuple2<String, Integer>> inputType = TypeInfoParser.parse("Tuple2<String, Integer>");
@SuppressWarnings({ "unchecked", "rawtypes" }) TypeSerializer<StreamRecord<Tuple2<String, Integer>>> streamRecordSerializer = (TypeSerializer<StreamRecord<Tuple2<String, Integer>>>) new StreamElementSerializer(inputType.createSerializer(new ExecutionConfig()));
ListStateDescriptor<StreamRecord<Tuple2<String, Integer>>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer);
EvictingWindowOperator<String, Tuple2<String, Integer>, Tuple2<String, Integer>, GlobalWindow> operator = new EvictingWindowOperator<>(GlobalWindows.create(), new GlobalWindow.Serializer(), new TupleKeySelector(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()), stateDesc, new InternalIterableWindowFunction<>(new RichSumReducer<GlobalWindow>(closeCalled)), CountTrigger.of(TRIGGER_COUNT), DeltaEvictor.of(THRESHOLD, new DeltaFunction<Tuple2<String, Integer>>() {
@Override
public double getDelta(Tuple2<String, Integer> oldDataPoint, Tuple2<String, Integer> newDataPoint) {
return newDataPoint.f1 - oldDataPoint.f1;
}
}, EVICT_AFTER), 0, null);
OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple2<String, Integer>> testHarness = new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector(), BasicTypeInfo.STRING_TYPE_INFO);
long initialTime = 0L;
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
testHarness.open();
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), initialTime + 3000));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 4), initialTime + 3999));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), initialTime + 20));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), initialTime));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 5), initialTime + 999));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 5), initialTime + 1998));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 6), initialTime + 1999));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), initialTime + 1000));
expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 4), Long.MAX_VALUE));
expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 11), Long.MAX_VALUE));
expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 2), Long.MAX_VALUE));
TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new ResultSortComparator());
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 3), initialTime + 10999));
testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 10), initialTime + 1000));
expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 8), Long.MAX_VALUE));
expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 10), Long.MAX_VALUE));
TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new ResultSortComparator());
testHarness.close();
Assert.assertEquals("Close was not called.", 1, closeCalled.get());
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class CEPOperatorTest method testKeyedAdvancingTimeWithoutElements.
/**
* Tests that the internal time of a CEP operator advances only given watermarks. See FLINK-5033
*/
@Test
public void testKeyedAdvancingTimeWithoutElements() throws Exception {
final KeySelector<Event, Integer> keySelector = new TestKeySelector();
final Event startEvent = new Event(42, "start", 1.0);
final long watermarkTimestamp1 = 5L;
final long watermarkTimestamp2 = 13L;
final Map<String, Event> expectedSequence = new HashMap<>(2);
expectedSequence.put("start", startEvent);
OneInputStreamOperatorTestHarness<Event, Either<Tuple2<Map<String, Event>, Long>, Map<String, Event>>> harness = new KeyedOneInputStreamOperatorTestHarness<>(new TimeoutKeyedCEPPatternOperator<>(Event.createTypeSerializer(), false, keySelector, IntSerializer.INSTANCE, new NFAFactory(true), true), keySelector, BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup(new KryoSerializer<>((Class<Either<Tuple2<Map<String, Event>, Long>, Map<String, Event>>>) (Object) Either.class, new ExecutionConfig()));
harness.open();
harness.processElement(new StreamRecord<>(startEvent, 3L));
harness.processWatermark(new Watermark(watermarkTimestamp1));
harness.processWatermark(new Watermark(watermarkTimestamp2));
Queue<Object> result = harness.getOutput();
assertEquals(3L, result.size());
Object watermark1 = result.poll();
assertTrue(watermark1 instanceof Watermark);
assertEquals(watermarkTimestamp1, ((Watermark) watermark1).getTimestamp());
Object resultObject = result.poll();
assertTrue(resultObject instanceof StreamRecord);
StreamRecord<Either<Tuple2<Map<String, Event>, Long>, Map<String, Event>>> streamRecord = (StreamRecord<Either<Tuple2<Map<String, Event>, Long>, Map<String, Event>>>) resultObject;
assertTrue(streamRecord.getValue() instanceof Either.Left);
Either.Left<Tuple2<Map<String, Event>, Long>, Map<String, Event>> left = (Either.Left<Tuple2<Map<String, Event>, Long>, Map<String, Event>>) streamRecord.getValue();
Tuple2<Map<String, Event>, Long> leftResult = left.left();
assertEquals(watermarkTimestamp2, (long) leftResult.f1);
assertEquals(expectedSequence, leftResult.f0);
Object watermark2 = result.poll();
assertTrue(watermark2 instanceof Watermark);
assertEquals(watermarkTimestamp2, ((Watermark) watermark2).getTimestamp());
} finally {
harness.close();
}
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class BoltWrapperTest method testMultipleOutputStreams.
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testMultipleOutputStreams() throws Exception {
final boolean rawOutType1 = super.r.nextBoolean();
final boolean rawOutType2 = super.r.nextBoolean();
final StreamRecord record = mock(StreamRecord.class);
when(record.getValue()).thenReturn(2).thenReturn(3);
final Output output = mock(Output.class);
final TestBolt bolt = new TestBolt();
final HashSet<String> raw = new HashSet<String>();
if (rawOutType1) {
raw.add("stream1");
}
if (rawOutType2) {
raw.add("stream2");
}
final BoltWrapper wrapper = new BoltWrapper(bolt, null, raw);
wrapper.setup(createMockStreamTask(), new StreamConfig(new Configuration()), output);
wrapper.open();
final SplitStreamType splitRecord = new SplitStreamType<Integer>();
if (rawOutType1) {
splitRecord.streamId = "stream1";
splitRecord.value = 2;
} else {
splitRecord.streamId = "stream1";
splitRecord.value = new Tuple1<Integer>(2);
}
wrapper.processElement(record);
verify(output).collect(new StreamRecord<SplitStreamType>(splitRecord));
if (rawOutType2) {
splitRecord.streamId = "stream2";
splitRecord.value = 3;
} else {
splitRecord.streamId = "stream2";
splitRecord.value = new Tuple1<Integer>(3);
}
wrapper.processElement(record);
verify(output, times(2)).collect(new StreamRecord<SplitStreamType>(splitRecord));
}
Aggregations