Search in sources :

Example 1 with RecordOrEventCollectingResultPartitionWriter

use of org.apache.flink.runtime.io.network.api.writer.RecordOrEventCollectingResultPartitionWriter in project flink by apache.

the class SubtaskCheckpointCoordinatorTest method testBroadcastCancelCheckpointMarkerOnAbortingFromCoordinator.

@Test
public void testBroadcastCancelCheckpointMarkerOnAbortingFromCoordinator() throws Exception {
    OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(OneInputStreamTask::new, 1, 1, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
    testHarness.setupOutputForSingletonOperatorChain();
    StreamConfig streamConfig = testHarness.getStreamConfig();
    streamConfig.setStreamOperator(new MapOperator());
    testHarness.invoke();
    testHarness.waitForTaskRunning();
    MockEnvironment mockEnvironment = MockEnvironment.builder().build();
    try (SubtaskCheckpointCoordinator subtaskCheckpointCoordinator = new MockSubtaskCheckpointCoordinatorBuilder().setEnvironment(mockEnvironment).build()) {
        ArrayList<Object> recordOrEvents = new ArrayList<>();
        StreamElementSerializer<String> stringStreamElementSerializer = new StreamElementSerializer<>(StringSerializer.INSTANCE);
        ResultPartitionWriter resultPartitionWriter = new RecordOrEventCollectingResultPartitionWriter<>(recordOrEvents, stringStreamElementSerializer);
        mockEnvironment.addOutputs(Collections.singletonList(resultPartitionWriter));
        OneInputStreamTask<String, String> task = testHarness.getTask();
        OperatorChain<String, OneInputStreamOperator<String, String>> operatorChain = new RegularOperatorChain<>(task, StreamTask.createRecordWriterDelegate(streamConfig, mockEnvironment));
        long checkpointId = 42L;
        // notify checkpoint aborted before execution.
        subtaskCheckpointCoordinator.notifyCheckpointAborted(checkpointId, operatorChain, () -> true);
        subtaskCheckpointCoordinator.checkpointState(new CheckpointMetaData(checkpointId, System.currentTimeMillis()), CheckpointOptions.forCheckpointWithDefaultLocation(), new CheckpointMetricsBuilder(), operatorChain, false, () -> false);
        assertEquals(1, recordOrEvents.size());
        Object recordOrEvent = recordOrEvents.get(0);
        // ensure CancelCheckpointMarker is broadcast downstream.
        assertTrue(recordOrEvent instanceof CancelCheckpointMarker);
        assertEquals(checkpointId, ((CancelCheckpointMarker) recordOrEvent).getCheckpointId());
        testHarness.endInput();
        testHarness.waitForTaskCompletion();
    }
}
Also used : CheckpointMetricsBuilder(org.apache.flink.runtime.checkpoint.CheckpointMetricsBuilder) RecordOrEventCollectingResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.RecordOrEventCollectingResultPartitionWriter) ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter) ArrayList(java.util.ArrayList) CancelCheckpointMarker(org.apache.flink.runtime.io.network.api.CancelCheckpointMarker) StreamConfig(org.apache.flink.streaming.api.graph.StreamConfig) CheckpointMetaData(org.apache.flink.runtime.checkpoint.CheckpointMetaData) OneInputStreamOperator(org.apache.flink.streaming.api.operators.OneInputStreamOperator) MockEnvironment(org.apache.flink.runtime.operators.testutils.MockEnvironment) StreamElementSerializer(org.apache.flink.streaming.runtime.streamrecord.StreamElementSerializer) RecordOrEventCollectingResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.RecordOrEventCollectingResultPartitionWriter) Test(org.junit.Test)

Example 2 with RecordOrEventCollectingResultPartitionWriter

use of org.apache.flink.runtime.io.network.api.writer.RecordOrEventCollectingResultPartitionWriter in project flink by apache.

the class MultipleInputStreamTaskChainedSourcesCheckpointingTest method testSkipExecutionsIfFinishedOnRestoreWithSourceChained.

@Test
public void testSkipExecutionsIfFinishedOnRestoreWithSourceChained() throws Exception {
    OperatorID firstSourceOperatorId = new OperatorID();
    OperatorID secondSourceOperatorId = new OperatorID();
    OperatorID nonSourceOperatorId = new OperatorID();
    List<Object> output = new ArrayList<>();
    try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).modifyStreamConfig(config -> config.setCheckpointingEnabled(true)).modifyExecutionConfig(applyObjectReuse(objectReuse)).addInput(BasicTypeInfo.INT_TYPE_INFO).addAdditionalOutput(new RecordOrEventCollectingResultPartitionWriter<StreamElement>(output, new StreamElementSerializer<>(IntSerializer.INSTANCE)) {

        @Override
        public void notifyEndOfData(StopMode mode) throws IOException {
            broadcastEvent(new EndOfData(mode), false);
        }
    }).addSourceInput(firstSourceOperatorId, new SourceOperatorFactory<>(new SourceOperatorStreamTaskTest.LifeCycleMonitorSource(Boundedness.CONTINUOUS_UNBOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).addSourceInput(secondSourceOperatorId, new SourceOperatorFactory<>(new SourceOperatorStreamTaskTest.LifeCycleMonitorSource(Boundedness.CONTINUOUS_UNBOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).setTaskStateSnapshot(1, TaskStateSnapshot.FINISHED_ON_RESTORE).setupOperatorChain(nonSourceOperatorId, new LifeCycleMonitorMultipleInputOperatorFactory()).chain(new TestFinishedOnRestoreStreamOperator(), StringSerializer.INSTANCE).finish().build()) {
        testHarness.processElement(Watermark.MAX_WATERMARK);
        assertThat(output, is(empty()));
        testHarness.waitForTaskCompletion();
        assertThat(output, contains(Watermark.MAX_WATERMARK, new EndOfData(StopMode.DRAIN)));
        for (StreamOperatorWrapper<?, ?> wrapper : testHarness.getStreamTask().operatorChain.getAllOperators()) {
            if (wrapper.getStreamOperator() instanceof SourceOperator<?, ?>) {
                SourceOperatorStreamTaskTest.LifeCycleMonitorSourceReader sourceReader = (SourceOperatorStreamTaskTest.LifeCycleMonitorSourceReader) ((SourceOperator<?, ?>) wrapper.getStreamOperator()).getSourceReader();
                sourceReader.getLifeCycleMonitor().assertCallTimes(0, LifeCyclePhase.values());
            }
        }
    }
}
Also used : ArrayList(java.util.ArrayList) StopMode(org.apache.flink.runtime.io.network.api.StopMode) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) SourceOperatorFactory(org.apache.flink.streaming.api.operators.SourceOperatorFactory) EndOfData(org.apache.flink.runtime.io.network.api.EndOfData) SourceOperator(org.apache.flink.streaming.api.operators.SourceOperator) RecordOrEventCollectingResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.RecordOrEventCollectingResultPartitionWriter) Test(org.junit.Test)

Aggregations

ArrayList (java.util.ArrayList)2 RecordOrEventCollectingResultPartitionWriter (org.apache.flink.runtime.io.network.api.writer.RecordOrEventCollectingResultPartitionWriter)2 Test (org.junit.Test)2 CheckpointMetaData (org.apache.flink.runtime.checkpoint.CheckpointMetaData)1 CheckpointMetricsBuilder (org.apache.flink.runtime.checkpoint.CheckpointMetricsBuilder)1 CancelCheckpointMarker (org.apache.flink.runtime.io.network.api.CancelCheckpointMarker)1 EndOfData (org.apache.flink.runtime.io.network.api.EndOfData)1 StopMode (org.apache.flink.runtime.io.network.api.StopMode)1 ResultPartitionWriter (org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter)1 OperatorID (org.apache.flink.runtime.jobgraph.OperatorID)1 MockEnvironment (org.apache.flink.runtime.operators.testutils.MockEnvironment)1 StreamConfig (org.apache.flink.streaming.api.graph.StreamConfig)1 OneInputStreamOperator (org.apache.flink.streaming.api.operators.OneInputStreamOperator)1 SourceOperator (org.apache.flink.streaming.api.operators.SourceOperator)1 SourceOperatorFactory (org.apache.flink.streaming.api.operators.SourceOperatorFactory)1 StreamElementSerializer (org.apache.flink.streaming.runtime.streamrecord.StreamElementSerializer)1