use of org.apache.flink.streaming.api.watermark.Watermark in project flink by apache.
the class CEPMigrationTest method writeAfterBranchingPatternSnapshot.
/**
* Manually run this to write binary snapshot data.
*/
@Ignore
@Test
public void writeAfterBranchingPatternSnapshot() throws Exception {
KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
private static final long serialVersionUID = -4873366487571254798L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
};
final Event startEvent = new Event(42, "start", 1.0);
final SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);
final SubEvent middleEvent2 = new SubEvent(42, "foo2", 2.0, 10.0);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = new KeyedOneInputStreamOperatorTestHarness<>(CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAFactory()), keySelector, BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup();
harness.open();
harness.processElement(new StreamRecord<Event>(startEvent, 1));
harness.processElement(new StreamRecord<Event>(new Event(42, "foobar", 1.0), 2));
harness.processElement(new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3));
harness.processElement(new StreamRecord<Event>(middleEvent1, 2));
harness.processElement(new StreamRecord<Event>(middleEvent2, 3));
harness.processWatermark(new Watermark(5));
// do snapshot and save to file
OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/cep-migration-after-branching-flink" + flinkGenerateSavepointVersion + "-snapshot");
} finally {
harness.close();
}
}
use of org.apache.flink.streaming.api.watermark.Watermark in project flink by apache.
the class CEPMigrationTest method testAndOrSubtypeConditionsAfterMigration.
@Test
public void testAndOrSubtypeConditionsAfterMigration() throws Exception {
KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
private static final long serialVersionUID = -4873366487571254798L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
};
final Event startEvent1 = new SubEvent(42, "start", 1.0, 6.0);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = new KeyedOneInputStreamOperatorTestHarness<>(CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAComplexConditionsFactory()), keySelector, BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup();
harness.initializeState(OperatorSnapshotUtil.getResourceFilename("cep-migration-conditions-flink" + migrateVersion + "-snapshot"));
harness.open();
final Event endEvent = new SubEvent(42, "end", 1.0, 2.0);
harness.processElement(new StreamRecord<>(endEvent, 9));
harness.processWatermark(new Watermark(20));
ConcurrentLinkedQueue<Object> result = harness.getOutput();
// watermark and the result
assertEquals(2, result.size());
Object resultObject = result.poll();
assertTrue(resultObject instanceof StreamRecord);
StreamRecord<?> resultRecord = (StreamRecord<?>) resultObject;
assertTrue(resultRecord.getValue() instanceof Map);
@SuppressWarnings("unchecked") Map<String, List<Event>> patternMap = (Map<String, List<Event>>) resultRecord.getValue();
assertEquals(startEvent1, patternMap.get("start").get(0));
assertEquals(endEvent, patternMap.get("start").get(1));
} finally {
harness.close();
}
}
use of org.apache.flink.streaming.api.watermark.Watermark in project flink by apache.
the class CEPMigrationTest method writeStartingNewPatternAfterMigrationSnapshot.
/**
* Manually run this to write binary snapshot data.
*/
@Ignore
@Test
public void writeStartingNewPatternAfterMigrationSnapshot() throws Exception {
KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
private static final long serialVersionUID = -4873366487571254798L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
};
final Event startEvent1 = new Event(42, "start", 1.0);
final SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = new KeyedOneInputStreamOperatorTestHarness<>(CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAFactory()), keySelector, BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup();
harness.open();
harness.processElement(new StreamRecord<Event>(startEvent1, 1));
harness.processElement(new StreamRecord<Event>(new Event(42, "foobar", 1.0), 2));
harness.processElement(new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3));
harness.processElement(new StreamRecord<Event>(middleEvent1, 2));
harness.processWatermark(new Watermark(5));
// do snapshot and save to file
OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/cep-migration-starting-new-pattern-flink" + flinkGenerateSavepointVersion + "-snapshot");
} finally {
harness.close();
}
}
use of org.apache.flink.streaming.api.watermark.Watermark in project flink by apache.
the class CEPMigrationTest method writeAndOrSubtypConditionsPatternAfterMigrationSnapshot.
/**
* Manually run this to write binary snapshot data.
*/
@Ignore
@Test
public void writeAndOrSubtypConditionsPatternAfterMigrationSnapshot() throws Exception {
KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
private static final long serialVersionUID = -4873366487571254798L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
};
final Event startEvent1 = new SubEvent(42, "start", 1.0, 6.0);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = new KeyedOneInputStreamOperatorTestHarness<>(CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAComplexConditionsFactory()), keySelector, BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup();
harness.open();
harness.processElement(new StreamRecord<>(startEvent1, 5));
harness.processWatermark(new Watermark(6));
// do snapshot and save to file
OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/cep-migration-conditions-flink" + flinkGenerateSavepointVersion + "-snapshot");
} finally {
harness.close();
}
}
use of org.apache.flink.streaming.api.watermark.Watermark in project flink by apache.
the class KafkaShuffleITCase method testWatermarkBroadcasting.
/**
* To test value and watermark serialization and deserialization with time characteristic:
* EventTime.
*
* <p>Producer Parallelism = 1; Kafka Partition # = 1; Consumer Parallelism = 1.
*/
@Test
public void testWatermarkBroadcasting() throws Exception {
final int numberOfPartitions = 3;
final int producerParallelism = 2;
final int numElementsPerProducer = 1000;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
Map<Integer, Collection<ConsumerRecord<byte[], byte[]>>> results = testKafkaShuffleProducer(topic("test_watermark_broadcast", EventTime), env, numberOfPartitions, producerParallelism, numElementsPerProducer, EventTime);
TypeSerializer<Tuple3<Integer, Long, Integer>> typeSerializer = createTypeSerializer(env);
KafkaShuffleElementDeserializer deserializer = new KafkaShuffleElementDeserializer<>(typeSerializer);
// Records in a single partition are kept in order
for (int p = 0; p < numberOfPartitions; p++) {
Collection<ConsumerRecord<byte[], byte[]>> records = results.get(p);
Map<Integer, List<KafkaShuffleWatermark>> watermarks = new HashMap<>();
for (ConsumerRecord<byte[], byte[]> consumerRecord : records) {
assertNull(consumerRecord.key());
KafkaShuffleElement element = deserializer.deserialize(consumerRecord);
if (element.isRecord()) {
KafkaShuffleRecord<Tuple3<Integer, Long, Integer>> record = element.asRecord();
assertEquals(record.getValue().f1.longValue(), INIT_TIMESTAMP + record.getValue().f0);
assertEquals(record.getTimestamp().longValue(), record.getValue().f1.longValue());
} else if (element.isWatermark()) {
KafkaShuffleWatermark watermark = element.asWatermark();
watermarks.computeIfAbsent(watermark.getSubtask(), k -> new ArrayList<>());
watermarks.get(watermark.getSubtask()).add(watermark);
} else {
fail("KafkaShuffleElement is either record or watermark");
}
}
// Besides, watermarks from the same producer sub task should keep in order.
for (List<KafkaShuffleWatermark> subTaskWatermarks : watermarks.values()) {
int index = 0;
assertEquals(numElementsPerProducer + 1, subTaskWatermarks.size());
for (KafkaShuffleWatermark watermark : subTaskWatermarks) {
if (index == numElementsPerProducer) {
// the last element is the watermark that signifies end-of-event-time
assertEquals(watermark.getWatermark(), Watermark.MAX_WATERMARK.getTimestamp());
} else {
assertEquals(watermark.getWatermark(), INIT_TIMESTAMP + index++);
}
}
}
}
}
Aggregations