use of org.apache.flink.runtime.operators.lifecycle.TestJobWithDescription in project flink by apache.
the class TestOperatorLifecycleValidator method checkOperatorsLifecycle.
static void checkOperatorsLifecycle(TestJobWithDescription testJob, TestOperatorLifecycleValidator... validators) {
Map<Tuple2<String, Integer>, List<TestEvent>> eventsByOperator = new HashMap<>();
for (TestEvent ev : testJob.eventQueue.getAll()) {
eventsByOperator.computeIfAbsent(Tuple2.of(ev.operatorId, ev.subtaskIndex), ign -> new ArrayList<>()).add(ev);
}
eventsByOperator.forEach((operatorIdAndIndex, operatorEvents) -> {
String id = operatorIdAndIndex.f0;
if (testJob.operatorsWithLifecycleTracking.contains(id)) {
for (TestOperatorLifecycleValidator validator : validators) {
validator.validateOperatorLifecycle(testJob, id, operatorIdAndIndex.f1, operatorEvents);
}
}
});
}
use of org.apache.flink.runtime.operators.lifecycle.TestJobWithDescription in project flink by apache.
the class FinishingValidator method validateOperatorLifecycle.
@Override
public void validateOperatorLifecycle(TestJobWithDescription job, String operatorId, int subtaskIndex, List<TestEvent> operatorEvents) {
boolean opFinished = false;
Set<Long> finalCheckpointCandidates = new HashSet<>();
for (TestEvent ev : operatorEvents) {
if (ev instanceof OperatorFinishedEvent) {
opFinished = true;
} else if (ev instanceof CheckpointStartedEvent) {
if (opFinished) {
finalCheckpointCandidates.add(((CheckpointStartedEvent) ev).checkpointID);
}
} else if (ev instanceof CheckpointCompletedEvent) {
if (finalCheckpointCandidates.contains(((CheckpointCompletedEvent) ev).checkpointID)) {
return;
}
} else if (opFinished) {
fail(format("Unexpected event after operator %s[%d] finished: %s", operatorId, subtaskIndex, ev));
}
}
assertTrue(format("Operator %s[%d] wasn't finished (events: %s)", operatorId, subtaskIndex, operatorEvents), opFinished);
fail(format("Operator %s[%d] was finished but didn't finish the checkpoint after that;" + "checkpoints started after finish: %s (events (excluding watermarks): %s)", operatorId, subtaskIndex, finalCheckpointCandidates, operatorEvents.stream().filter(ev -> !(ev instanceof WatermarkReceivedEvent)).collect(toList())));
}
use of org.apache.flink.runtime.operators.lifecycle.TestJobWithDescription in project flink by apache.
the class DrainingValidator method validateOperatorLifecycle.
@Override
public void validateOperatorLifecycle(TestJobWithDescription job, String operatorId, int subtaskIndex, List<TestEvent> operatorEvents) {
Map<Integer, List<TestEvent>> byAttempt = new HashMap<>();
Set<Integer> normallyFinishedAttempts = new HashSet<>();
int lastAttempt = Integer.MIN_VALUE;
for (TestEvent e : operatorEvents) {
byAttempt.computeIfAbsent(e.attemptNumber, ign -> new ArrayList<>()).add(e);
if (isFinishAck(e)) {
normallyFinishedAttempts.add(e.attemptNumber);
}
lastAttempt = Math.max(lastAttempt, e.attemptNumber);
}
for (Map.Entry<Integer, List<TestEvent>> entry : byAttempt.entrySet()) {
// Skip if this or other task from this attempt failed.
if (lastAttempt == entry.getKey() || normallyFinishedAttempts.contains(entry.getKey())) {
validateSubtaskAttempt(job, operatorId, subtaskIndex, entry.getValue());
}
}
}
use of org.apache.flink.runtime.operators.lifecycle.TestJobWithDescription in project flink by apache.
the class TestJobDataFlowValidator method checkDataFlow.
public static void checkDataFlow(TestJobWithDescription testJob, boolean withDrain) {
Map<String, Map<Integer, OperatorFinishedEvent>> finishEvents = new HashMap<>();
for (TestEvent ev : testJob.eventQueue.getAll()) {
if (ev instanceof OperatorFinishedEvent) {
finishEvents.computeIfAbsent(ev.operatorId, ign -> new HashMap<>()).put(ev.subtaskIndex, ((OperatorFinishedEvent) ev));
}
}
for (JobVertex upstream : testJob.jobGraph.getVertices()) {
for (IntermediateDataSet produced : upstream.getProducedDataSets()) {
JobEdge edge = produced.getConsumer();
Optional<String> upstreamIDOptional = getTrackedOperatorID(upstream, true, testJob);
Optional<String> downstreamIDOptional = getTrackedOperatorID(edge.getTarget(), false, testJob);
if (upstreamIDOptional.isPresent() && downstreamIDOptional.isPresent()) {
final String upstreamID = upstreamIDOptional.get();
final String downstreamID = downstreamIDOptional.get();
if (testJob.sources.contains(upstreamID)) {
// TODO: if we add tests for FLIP-27 sources we might need to adjust
// this condition
LOG.debug("Legacy sources do not have the finish() method and thus do not" + " emit FinishEvent");
} else {
checkDataFlow(upstreamID, downstreamID, edge, finishEvents, withDrain);
}
} else {
LOG.debug("Ignoring edge (untracked operator): {}", edge);
}
}
}
}
use of org.apache.flink.runtime.operators.lifecycle.TestJobWithDescription in project flink by apache.
the class IncrementalStateReuseAfterFailureITCase method createJob.
private TestJobWithDescription createJob() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(200);
// reliably fails Changelog with FLINK-25395, but might affect any incremental backend
env.enableChangelogStateBackend(true);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1));
// simplify debugging
env.setMaxParallelism(1);
// simplify debugging
env.setParallelism(1);
TestEventQueue evQueue = TestEventQueue.createShared(sharedObjects);
TestCommandDispatcher cmdQueue = TestCommandDispatcher.createShared(sharedObjects);
DataStream<TestDataElement> src = env.addSource(new TestEventSource(UID_SRC, evQueue, cmdQueue)).setUidHash(UID_SRC);
SingleOutputStreamOperator<TestDataElement> transform1 = src.keyBy(x -> x).transform("transform-1", TypeInformation.of(TestDataElement.class), new OneInputTestStreamOperatorFactory(UID_OP1, evQueue, cmdQueue)).setUidHash(UID_OP1);
SingleOutputStreamOperator<TestDataElement> transform2 = // chain two keyed operators, so that one is checkpointed and the other one fails
DataStreamUtils.reinterpretAsKeyedStream(transform1, x -> x).transform("transform-2", TypeInformation.of(TestDataElement.class), new OneInputTestStreamOperatorFactory(UID_OP2, evQueue, cmdQueue)).setUidHash(UID_OP2);
transform2.addSink(new DiscardingSink<>());
return new TestJobWithDescription(env.getStreamGraph().getJobGraph(), emptySet(), emptySet(), emptySet(), emptyMap(), evQueue, cmdQueue);
}
Aggregations