use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class NFATest method testSimpleNFA.
@Test
public void testSimpleNFA() {
NFA<Event> nfa = new NFA<>(Event.createTypeSerializer(), 0, false);
List<StreamRecord<Event>> streamEvents = new ArrayList<>();
streamEvents.add(new StreamRecord<>(new Event(1, "start", 1.0), 1L));
streamEvents.add(new StreamRecord<>(new Event(2, "bar", 2.0), 2L));
streamEvents.add(new StreamRecord<>(new Event(3, "start", 3.0), 3L));
streamEvents.add(new StreamRecord<>(new Event(4, "end", 4.0), 4L));
State<Event> startingState = new State<>("", State.StateType.Start);
State<Event> startState = new State<>("start", State.StateType.Normal);
State<Event> endState = new State<>("end", State.StateType.Final);
StateTransition<Event> starting2Start = new StateTransition<>(StateTransitionAction.TAKE, startState, new FilterFunction<Event>() {
private static final long serialVersionUID = -4869589195918650396L;
@Override
public boolean filter(Event value) throws Exception {
return value.getName().equals("start");
}
});
StateTransition<Event> start2End = new StateTransition<>(StateTransitionAction.TAKE, endState, new FilterFunction<Event>() {
private static final long serialVersionUID = 2979804163709590673L;
@Override
public boolean filter(Event value) throws Exception {
return value.getName().equals("end");
}
});
StateTransition<Event> start2Start = new StateTransition<>(StateTransitionAction.IGNORE, startState, null);
startingState.addStateTransition(starting2Start);
startState.addStateTransition(start2End);
startState.addStateTransition(start2Start);
nfa.addState(startingState);
nfa.addState(startState);
nfa.addState(endState);
Set<Map<String, Event>> expectedPatterns = new HashSet<>();
Map<String, Event> firstPattern = new HashMap<>();
firstPattern.put("start", new Event(1, "start", 1.0));
firstPattern.put("end", new Event(4, "end", 4.0));
Map<String, Event> secondPattern = new HashMap<>();
secondPattern.put("start", new Event(3, "start", 3.0));
secondPattern.put("end", new Event(4, "end", 4.0));
expectedPatterns.add(firstPattern);
expectedPatterns.add(secondPattern);
Collection<Map<String, Event>> actualPatterns = runNFA(nfa, streamEvents);
assertEquals(expectedPatterns, actualPatterns);
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class NFATest method testTimeoutWindowPruningWindowBorders.
/**
* Tests that pruning shared buffer elements and computations state use the same window border
* semantics (left side inclusive and right side exclusive)
*/
@Test
public void testTimeoutWindowPruningWindowBorders() {
NFA<Event> nfa = createStartEndNFA(2);
List<StreamRecord<Event>> streamEvents = new ArrayList<>();
streamEvents.add(new StreamRecord<>(new Event(1, "start", 1.0), 1L));
streamEvents.add(new StreamRecord<>(new Event(2, "start", 2.0), 2L));
streamEvents.add(new StreamRecord<>(new Event(3, "foobar", 3.0), 3L));
streamEvents.add(new StreamRecord<>(new Event(4, "end", 4.0), 3L));
Set<Map<String, Event>> expectedPatterns = new HashSet<>();
Map<String, Event> secondPattern = new HashMap<>();
secondPattern.put("start", new Event(2, "start", 2.0));
secondPattern.put("end", new Event(4, "end", 4.0));
expectedPatterns.add(secondPattern);
Collection<Map<String, Event>> actualPatterns = runNFA(nfa, streamEvents);
assertEquals(expectedPatterns, actualPatterns);
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class AbstractKeyedCEPPatternOperator method restoreState.
////////////////////// Backwards Compatibility //////////////////////
@Override
public void restoreState(FSDataInputStream in) throws Exception {
// this is the flag indicating if we have udf
// state to restore (not needed here)
in.read();
DataInputViewStreamWrapper inputView = new DataInputViewStreamWrapper(in);
InternalWatermarkCallbackService<KEY> watermarkCallbackService = getInternalWatermarkCallbackService();
if (migratingFromOldKeyedOperator) {
int numberEntries = inputView.readInt();
for (int i = 0; i < numberEntries; i++) {
watermarkCallbackService.registerKeyForWatermarkCallback(keySerializer.deserialize(inputView));
}
} else {
final ObjectInputStream ois = new ObjectInputStream(in);
// retrieve the NFA
@SuppressWarnings("unchecked") NFA<IN> nfa = (NFA<IN>) ois.readObject();
// retrieve the elements that were pending in the priority queue
MultiplexingStreamRecordSerializer<IN> recordSerializer = new MultiplexingStreamRecordSerializer<>(inputSerializer);
PriorityQueue<StreamRecord<IN>> priorityQueue = priorityQueueFactory.createPriorityQueue();
int entries = ois.readInt();
for (int i = 0; i < entries; i++) {
StreamElement streamElement = recordSerializer.deserialize(inputView);
priorityQueue.offer(streamElement.<IN>asRecord());
}
// finally register the retrieved state with the new keyed state.
setCurrentKey((byte) 0);
nfaOperatorState.update(nfa);
priorityQueueOperatorState.update(priorityQueue);
if (!isProcessingTime) {
// this is relevant only for event/ingestion time
// need to work around type restrictions
InternalWatermarkCallbackService rawWatermarkCallbackService = (InternalWatermarkCallbackService) watermarkCallbackService;
rawWatermarkCallbackService.registerKeyForWatermarkCallback((byte) 0);
}
ois.close();
}
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class AggregatingAlignedProcessingTimeWindowOperatorTest method checkpointRestoreWithPendingWindowTumbling.
@Test
public void checkpointRestoreWithPendingWindowTumbling() {
try {
final int windowSize = 200;
// tumbling window that triggers every 50 milliseconds
AggregatingProcessingTimeWindowOperator<Integer, Tuple2<Integer, Integer>> op = new AggregatingProcessingTimeWindowOperator<>(sumFunction, fieldOneSelector, IntSerializer.INSTANCE, tupleSerializer, windowSize, windowSize);
OneInputStreamOperatorTestHarness<Tuple2<Integer, Integer>, Tuple2<Integer, Integer>> testHarness = new OneInputStreamOperatorTestHarness<>(op);
testHarness.setProcessingTime(0);
testHarness.setup();
testHarness.open();
// inject some elements
final int numElementsFirst = 700;
final int numElements = 1000;
for (int i = 0; i < numElementsFirst; i++) {
StreamRecord<Tuple2<Integer, Integer>> next = new StreamRecord<>(new Tuple2<>(i, i));
testHarness.processElement(next);
}
// draw a snapshot
List<Tuple2<Integer, Integer>> resultAtSnapshot = extractFromStreamRecords(testHarness.getOutput());
int beforeSnapShot = resultAtSnapshot.size();
StreamStateHandle state = testHarness.snapshotLegacy(1L, System.currentTimeMillis());
int afterSnapShot = testHarness.getOutput().size();
assertEquals("operator performed computation during snapshot", beforeSnapShot, afterSnapShot);
assertTrue(resultAtSnapshot.size() <= numElementsFirst);
// inject some random elements, which should not show up in the state
for (int i = numElementsFirst; i < numElements; i++) {
StreamRecord<Tuple2<Integer, Integer>> next = new StreamRecord<>(new Tuple2<>(i, i));
testHarness.processElement(next);
}
testHarness.close();
op.dispose();
// re-create the operator and restore the state
op = new AggregatingProcessingTimeWindowOperator<>(sumFunction, fieldOneSelector, IntSerializer.INSTANCE, tupleSerializer, windowSize, windowSize);
testHarness = new OneInputStreamOperatorTestHarness<>(op);
testHarness.setup();
testHarness.restore(state);
testHarness.open();
// inject the remaining elements
for (int i = numElementsFirst; i < numElements; i++) {
StreamRecord<Tuple2<Integer, Integer>> next = new StreamRecord<>(new Tuple2<>(i, i));
testHarness.processElement(next);
}
testHarness.setProcessingTime(200);
// get and verify the result
List<Tuple2<Integer, Integer>> finalResult = new ArrayList<>(resultAtSnapshot);
List<Tuple2<Integer, Integer>> partialFinalResult = extractFromStreamRecords(testHarness.getOutput());
finalResult.addAll(partialFinalResult);
assertEquals(numElements, finalResult.size());
Collections.sort(finalResult, tupleComparator);
for (int i = 0; i < numElements; i++) {
assertEquals(i, finalResult.get(i).f0.intValue());
assertEquals(i, finalResult.get(i).f1.intValue());
}
testHarness.close();
op.dispose();
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class AggregatingAlignedProcessingTimeWindowOperatorTest method checkpointRestoreWithPendingWindowSliding.
@Test
public void checkpointRestoreWithPendingWindowSliding() {
try {
final int factor = 4;
final int windowSlide = 50;
final int windowSize = factor * windowSlide;
// sliding window (200 msecs) every 50 msecs
AggregatingProcessingTimeWindowOperator<Integer, Tuple2<Integer, Integer>> op = new AggregatingProcessingTimeWindowOperator<>(sumFunction, fieldOneSelector, IntSerializer.INSTANCE, tupleSerializer, windowSize, windowSlide);
OneInputStreamOperatorTestHarness<Tuple2<Integer, Integer>, Tuple2<Integer, Integer>> testHarness = new OneInputStreamOperatorTestHarness<>(op);
testHarness.setProcessingTime(0);
testHarness.setup();
testHarness.open();
// inject some elements
final int numElements = 1000;
final int numElementsFirst = 700;
for (int i = 0; i < numElementsFirst; i++) {
StreamRecord<Tuple2<Integer, Integer>> next = new StreamRecord<>(new Tuple2<>(i, i));
testHarness.processElement(next);
}
// draw a snapshot
List<Tuple2<Integer, Integer>> resultAtSnapshot = extractFromStreamRecords(testHarness.getOutput());
int beforeSnapShot = resultAtSnapshot.size();
StreamStateHandle state = testHarness.snapshotLegacy(1L, System.currentTimeMillis());
int afterSnapShot = testHarness.getOutput().size();
assertEquals("operator performed computation during snapshot", beforeSnapShot, afterSnapShot);
assertTrue(resultAtSnapshot.size() <= factor * numElementsFirst);
// inject the remaining elements - these should not influence the snapshot
for (int i = numElementsFirst; i < numElements; i++) {
StreamRecord<Tuple2<Integer, Integer>> next = new StreamRecord<>(new Tuple2<>(i, i));
testHarness.processElement(next);
}
testHarness.close();
op.dispose();
// re-create the operator and restore the state
op = new AggregatingProcessingTimeWindowOperator<>(sumFunction, fieldOneSelector, IntSerializer.INSTANCE, tupleSerializer, windowSize, windowSlide);
testHarness = new OneInputStreamOperatorTestHarness<>(op);
testHarness.setup();
testHarness.restore(state);
testHarness.open();
// inject again the remaining elements
for (int i = numElementsFirst; i < numElements; i++) {
StreamRecord<Tuple2<Integer, Integer>> next = new StreamRecord<>(new Tuple2<>(i, i));
testHarness.processElement(next);
}
testHarness.setProcessingTime(50);
testHarness.setProcessingTime(100);
testHarness.setProcessingTime(150);
testHarness.setProcessingTime(200);
testHarness.setProcessingTime(250);
testHarness.setProcessingTime(300);
testHarness.setProcessingTime(350);
testHarness.setProcessingTime(400);
// get and verify the result
List<Tuple2<Integer, Integer>> finalResult = new ArrayList<>(resultAtSnapshot);
List<Tuple2<Integer, Integer>> partialFinalResult = extractFromStreamRecords(testHarness.getOutput());
finalResult.addAll(partialFinalResult);
assertEquals(numElements * factor, finalResult.size());
Collections.sort(finalResult, tupleComparator);
for (int i = 0; i < factor * numElements; i++) {
assertEquals(i / factor, finalResult.get(i).f0.intValue());
assertEquals(i / factor, finalResult.get(i).f1.intValue());
}
testHarness.close();
op.dispose();
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations