Search in sources :

Example 1 with EndOfData

use of org.apache.flink.runtime.io.network.api.EndOfData in project flink by apache.

the class SortMergeResultPartition method notifyEndOfData.

@Override
public void notifyEndOfData(StopMode mode) throws IOException {
    if (!hasNotifiedEndOfUserRecords) {
        broadcastEvent(new EndOfData(mode), false);
        hasNotifiedEndOfUserRecords = true;
    }
}
Also used : EndOfData(org.apache.flink.runtime.io.network.api.EndOfData)

Example 2 with EndOfData

use of org.apache.flink.runtime.io.network.api.EndOfData in project flink by apache.

the class ResultPartitionTest method testWaitForAllRecordProcessed.

@Test
public void testWaitForAllRecordProcessed() throws IOException {
    // Creates a result partition with 2 channels.
    BufferWritingResultPartition bufferWritingResultPartition = createResultPartition(ResultPartitionType.PIPELINED_BOUNDED);
    bufferWritingResultPartition.notifyEndOfData(StopMode.DRAIN);
    CompletableFuture<Void> allRecordsProcessedFuture = bufferWritingResultPartition.getAllDataProcessedFuture();
    assertFalse(allRecordsProcessedFuture.isDone());
    for (ResultSubpartition resultSubpartition : bufferWritingResultPartition.subpartitions) {
        assertEquals(1, resultSubpartition.getTotalNumberOfBuffersUnsafe());
        Buffer nextBuffer = ((PipelinedSubpartition) resultSubpartition).pollBuffer().buffer();
        assertFalse(nextBuffer.isBuffer());
        assertEquals(new EndOfData(StopMode.DRAIN), EventSerializer.fromBuffer(nextBuffer, getClass().getClassLoader()));
    }
    for (int i = 0; i < bufferWritingResultPartition.subpartitions.length; ++i) {
        ((PipelinedSubpartition) bufferWritingResultPartition.subpartitions[i]).acknowledgeAllDataProcessed();
        if (i < bufferWritingResultPartition.subpartitions.length - 1) {
            assertFalse(allRecordsProcessedFuture.isDone());
        } else {
            assertTrue(allRecordsProcessedFuture.isDone());
            assertFalse(allRecordsProcessedFuture.isCompletedExceptionally());
        }
    }
}
Also used : ByteBuffer(java.nio.ByteBuffer) Buffer(org.apache.flink.runtime.io.network.buffer.Buffer) EndOfData(org.apache.flink.runtime.io.network.api.EndOfData) Test(org.junit.Test)

Example 3 with EndOfData

use of org.apache.flink.runtime.io.network.api.EndOfData in project flink by apache.

the class MultipleInputStreamTaskTest method testSkipExecutionsIfFinishedOnRestore.

@Test
public void testSkipExecutionsIfFinishedOnRestore() throws Exception {
    OperatorID nonSourceOperatorId = new OperatorID();
    try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).setCollectNetworkEvents().modifyStreamConfig(config -> config.setCheckpointingEnabled(true)).modifyExecutionConfig(applyObjectReuse(objectReuse)).addInput(BasicTypeInfo.INT_TYPE_INFO).addInput(BasicTypeInfo.INT_TYPE_INFO).addInput(BasicTypeInfo.INT_TYPE_INFO).setTaskStateSnapshot(1, TaskStateSnapshot.FINISHED_ON_RESTORE).setupOperatorChain(nonSourceOperatorId, new LifeCycleMonitorMultipleInputOperatorFactory()).chain(new TestFinishedOnRestoreStreamOperator(), StringSerializer.INSTANCE).finish().build()) {
        testHarness.processElement(Watermark.MAX_WATERMARK, 0);
        testHarness.processElement(Watermark.MAX_WATERMARK, 1);
        testHarness.processElement(Watermark.MAX_WATERMARK, 2);
        testHarness.waitForTaskCompletion();
        assertThat(testHarness.getOutput(), contains(Watermark.MAX_WATERMARK, new EndOfData(StopMode.DRAIN)));
    }
}
Also used : EndOfData(org.apache.flink.runtime.io.network.api.EndOfData) TaskIOMetricGroup(org.apache.flink.runtime.metrics.groups.TaskIOMetricGroup) Arrays(java.util.Arrays) TestCheckpointResponder(org.apache.flink.runtime.taskmanager.TestCheckpointResponder) SharedObjects(org.apache.flink.testutils.junit.SharedObjects) NoMoreSplitsEvent(org.apache.flink.runtime.source.event.NoMoreSplitsEvent) AbstractStreamOperatorFactory(org.apache.flink.streaming.api.operators.AbstractStreamOperatorFactory) Duration(java.time.Duration) Map(java.util.Map) WatermarkStatus(org.apache.flink.streaming.runtime.watermarkstatus.WatermarkStatus) TypeSerializer(org.apache.flink.api.common.typeutils.TypeSerializer) BoundedOneInput(org.apache.flink.streaming.api.operators.BoundedOneInput) Serializable(java.io.Serializable) StopMode(org.apache.flink.runtime.io.network.api.StopMode) MetricNames(org.apache.flink.runtime.metrics.MetricNames) CheckpointBarrier(org.apache.flink.runtime.io.network.api.CheckpointBarrier) Matchers.contains(org.hamcrest.Matchers.contains) WatermarkMetricOperator(org.apache.flink.streaming.runtime.tasks.OneInputStreamTaskTest.WatermarkMetricOperator) OneInputStreamOperator(org.apache.flink.streaming.api.operators.OneInputStreamOperator) Boundedness(org.apache.flink.api.connector.source.Boundedness) Counter(org.apache.flink.metrics.Counter) RunWith(org.junit.runner.RunWith) ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter) TimestampAssigner(org.apache.flink.api.common.eventtime.TimestampAssigner) DataOutputView(org.apache.flink.core.memory.DataOutputView) AbstractInput(org.apache.flink.streaming.api.operators.AbstractInput) ArrayList(java.util.ArrayList) CompletingCheckpointResponder(org.apache.flink.streaming.util.CompletingCheckpointResponder) InternalOperatorMetricGroup(org.apache.flink.runtime.metrics.groups.InternalOperatorMetricGroup) Gauge(org.apache.flink.metrics.Gauge) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) TestHarnessUtil(org.apache.flink.streaming.util.TestHarnessUtil) Before(org.junit.Before) CheckpointStorageLocationReference(org.apache.flink.runtime.state.CheckpointStorageLocationReference) SourceReader(org.apache.flink.api.connector.source.SourceReader) Parameter(org.junit.runners.Parameterized.Parameter) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) InterceptingTaskMetricGroup(org.apache.flink.runtime.metrics.util.InterceptingTaskMetricGroup) AddSplitEvent(org.apache.flink.runtime.source.event.AddSplitEvent) StreamMultipleInputProcessor(org.apache.flink.streaming.runtime.io.StreamMultipleInputProcessor) AbstractStreamOperator(org.apache.flink.streaming.api.operators.AbstractStreamOperator) StreamOperator(org.apache.flink.streaming.api.operators.StreamOperator) JobID(org.apache.flink.api.common.JobID) UnregisteredMetricGroups(org.apache.flink.runtime.metrics.groups.UnregisteredMetricGroups) StreamTaskFinalCheckpointsTest.processMailTillCheckpointSucceeds(org.apache.flink.streaming.runtime.tasks.StreamTaskFinalCheckpointsTest.processMailTillCheckpointSucceeds) Assert(org.junit.Assert) ArrayDeque(java.util.ArrayDeque) Assert.assertEquals(org.junit.Assert.assertEquals) Input(org.apache.flink.streaming.api.operators.Input) LifeCycleMonitorMultipleInputOperatorFactory(org.apache.flink.streaming.runtime.tasks.MultipleInputStreamTaskChainedSourcesCheckpointingTest.LifeCycleMonitorMultipleInputOperatorFactory) WatermarkGenerator(org.apache.flink.api.common.eventtime.WatermarkGenerator) SavepointType(org.apache.flink.runtime.checkpoint.SavepointType) StringSerializer(org.apache.flink.api.common.typeutils.base.StringSerializer) StreamTaskFinalCheckpointsTest.triggerCheckpoint(org.apache.flink.streaming.runtime.tasks.StreamTaskFinalCheckpointsTest.triggerCheckpoint) BasicTypeInfo(org.apache.flink.api.common.typeinfo.BasicTypeInfo) ResultPartition(org.apache.flink.runtime.io.network.partition.ResultPartition) IntSerializer(org.apache.flink.api.common.typeutils.base.IntSerializer) TypeSerializerSnapshot(org.apache.flink.api.common.typeutils.TypeSerializerSnapshot) Parameterized(org.junit.runners.Parameterized) SourceReaderContext(org.apache.flink.api.connector.source.SourceReaderContext) TaskStateSnapshot(org.apache.flink.runtime.checkpoint.TaskStateSnapshot) CheckpointType(org.apache.flink.runtime.checkpoint.CheckpointType) InterceptingOperatorMetricGroup(org.apache.flink.runtime.metrics.util.InterceptingOperatorMetricGroup) BoundedMultiInput(org.apache.flink.streaming.api.operators.BoundedMultiInput) MockSourceReader(org.apache.flink.api.connector.source.mocks.MockSourceReader) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) WatermarkStrategy(org.apache.flink.api.common.eventtime.WatermarkStrategy) MockSourceSplitSerializer(org.apache.flink.api.connector.source.mocks.MockSourceSplitSerializer) CheckpointOptions(org.apache.flink.runtime.checkpoint.CheckpointOptions) MultipleInputStreamOperator(org.apache.flink.streaming.api.operators.MultipleInputStreamOperator) List(java.util.List) SerializedValue(org.apache.flink.util.SerializedValue) Preconditions.checkArgument(org.apache.flink.util.Preconditions.checkArgument) Matchers.containsInAnyOrder(org.hamcrest.Matchers.containsInAnyOrder) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) CancelCheckpointMarker(org.apache.flink.runtime.io.network.api.CancelCheckpointMarker) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) CheckpointResponder(org.apache.flink.runtime.taskmanager.CheckpointResponder) CheckpointMetrics(org.apache.flink.runtime.checkpoint.CheckpointMetrics) StreamConfig(org.apache.flink.streaming.api.graph.StreamConfig) SavepointFormatType(org.apache.flink.core.execution.SavepointFormatType) CheckpointMetaData(org.apache.flink.runtime.checkpoint.CheckpointMetaData) Parameters(org.junit.runners.Parameterized.Parameters) CoreMatchers.not(org.hamcrest.CoreMatchers.not) EndOfPartitionEvent(org.apache.flink.runtime.io.network.api.EndOfPartitionEvent) AbstractStreamOperatorV2(org.apache.flink.streaming.api.operators.AbstractStreamOperatorV2) Watermark(org.apache.flink.streaming.api.watermark.Watermark) ResultPartitionType(org.apache.flink.runtime.io.network.partition.ResultPartitionType) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) Metric(org.apache.flink.metrics.Metric) SourceOperatorFactory(org.apache.flink.streaming.api.operators.SourceOperatorFactory) MockSourceSplit(org.apache.flink.api.connector.source.mocks.MockSourceSplit) TaskMetricGroup(org.apache.flink.runtime.metrics.groups.TaskMetricGroup) StreamRecord(org.apache.flink.streaming.runtime.streamrecord.StreamRecord) MockSource(org.apache.flink.api.connector.source.mocks.MockSource) OperatorMetricGroup(org.apache.flink.metrics.groups.OperatorMetricGroup) DataInputView(org.apache.flink.core.memory.DataInputView) SharedReference(org.apache.flink.testutils.junit.SharedReference) StreamOperatorParameters(org.apache.flink.streaming.api.operators.StreamOperatorParameters) WatermarkOutput(org.apache.flink.api.common.eventtime.WatermarkOutput) IsMapContaining(org.hamcrest.collection.IsMapContaining) Consumer(java.util.function.Consumer) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) Rule(org.junit.Rule) PartitionTestUtils(org.apache.flink.runtime.io.network.partition.PartitionTestUtils) LatencyMarker(org.apache.flink.streaming.runtime.streamrecord.LatencyMarker) Collections(java.util.Collections) EndOfData(org.apache.flink.runtime.io.network.api.EndOfData) LifeCycleMonitorMultipleInputOperatorFactory(org.apache.flink.streaming.runtime.tasks.MultipleInputStreamTaskChainedSourcesCheckpointingTest.LifeCycleMonitorMultipleInputOperatorFactory) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) Test(org.junit.Test)

Example 4 with EndOfData

use of org.apache.flink.runtime.io.network.api.EndOfData in project flink by apache.

the class SourceTaskTerminationTest method stopWithSavepointStreamTaskTestHelper.

private void stopWithSavepointStreamTaskTestHelper(final boolean shouldTerminate) throws Exception {
    final long syncSavepointId = 34L;
    try (StreamTaskMailboxTestHarness<Long> srcTaskTestHarness = getSourceStreamTaskTestHarness()) {
        final StreamTask<Long, ?> srcTask = srcTaskTestHarness.getStreamTask();
        srcTaskTestHarness.processAll();
        // step by step let the source thread emit elements
        emitAndVerifyWatermarkAndElement(srcTaskTestHarness, 1L);
        emitAndVerifyWatermarkAndElement(srcTaskTestHarness, 2L);
        srcTaskTestHarness.processUntil(srcTask.triggerCheckpointAsync(new CheckpointMetaData(31L, 900), CheckpointOptions.forCheckpointWithDefaultLocation())::isDone);
        verifyCheckpointBarrier(srcTaskTestHarness.getOutput(), 31L);
        emitAndVerifyWatermarkAndElement(srcTaskTestHarness, 3L);
        srcTaskTestHarness.processUntil(srcTask.triggerCheckpointAsync(new CheckpointMetaData(syncSavepointId, 900), new CheckpointOptions(shouldTerminate ? SavepointType.terminate(SavepointFormatType.CANONICAL) : SavepointType.suspend(SavepointFormatType.CANONICAL), CheckpointStorageLocationReference.getDefault()))::isDone);
        if (shouldTerminate) {
            // if we are in TERMINATE mode, we expect the source task
            // to emit MAX_WM before the SYNC_SAVEPOINT barrier.
            verifyWatermark(srcTaskTestHarness.getOutput(), Watermark.MAX_WATERMARK);
        }
        verifyEvent(srcTaskTestHarness.getOutput(), new EndOfData(shouldTerminate ? StopMode.DRAIN : StopMode.NO_DRAIN));
        verifyCheckpointBarrier(srcTaskTestHarness.getOutput(), syncSavepointId);
        waitForSynchronousSavepointIdToBeSet(srcTask);
        assertTrue(srcTask.getSynchronousSavepointId().isPresent());
        srcTaskTestHarness.processUntil(srcTask.notifyCheckpointCompleteAsync(syncSavepointId)::isDone);
        srcTaskTestHarness.waitForTaskCompletion();
    }
}
Also used : EndOfData(org.apache.flink.runtime.io.network.api.EndOfData) CheckpointOptions(org.apache.flink.runtime.checkpoint.CheckpointOptions) CheckpointMetaData(org.apache.flink.runtime.checkpoint.CheckpointMetaData)

Example 5 with EndOfData

use of org.apache.flink.runtime.io.network.api.EndOfData in project flink by apache.

the class StreamTestSingleInputGate method setupInputChannels.

private TestInputChannel[] setupInputChannels() {
    TestInputChannel[] inputChannels = new TestInputChannel[numInputChannels];
    for (int i = 0; i < numInputChannels; i++) {
        final int channelIndex = i;
        final DataOutputSerializer dataOutputSerializer = new DataOutputSerializer(128);
        final SerializationDelegate<StreamElement> delegate = new SerializationDelegate<>(new StreamElementSerializer<T>(serializer));
        inputQueues[channelIndex] = new ConcurrentLinkedQueue<>();
        inputChannels[channelIndex] = new TestInputChannel(inputGate, i);
        final BufferAndAvailabilityProvider answer = () -> {
            ConcurrentLinkedQueue<InputValue<Object>> inputQueue = inputQueues[channelIndex];
            InputValue<Object> input;
            Buffer.DataType nextType;
            synchronized (inputQueue) {
                input = inputQueue.poll();
                nextType = !inputQueue.isEmpty() ? Buffer.DataType.DATA_BUFFER : Buffer.DataType.NONE;
            }
            if (input != null && input.isStreamEnd()) {
                inputChannels[channelIndex].setReleased();
                return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE, false), nextType, 0, 0));
            } else if (input != null && input.isDataEnd()) {
                return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(new EndOfData(StopMode.DRAIN), false), nextType, 0, 0));
            } else if (input != null && input.isStreamRecord()) {
                StreamElement inputElement = input.getStreamRecord();
                delegate.setInstance(inputElement);
                ByteBuffer serializedRecord = RecordWriter.serializeRecord(dataOutputSerializer, delegate);
                BufferBuilder bufferBuilder = createBufferBuilder(bufferSize);
                BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
                bufferBuilder.appendAndCommit(serializedRecord);
                bufferBuilder.finish();
                bufferBuilder.close();
                // Call getCurrentBuffer to ensure size is set
                return Optional.of(new BufferAndAvailability(bufferConsumer.build(), nextType, 0, 0));
            } else if (input != null && input.isEvent()) {
                AbstractEvent event = input.getEvent();
                if (event instanceof EndOfPartitionEvent) {
                    inputChannels[channelIndex].setReleased();
                }
                return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(event, false), nextType, 0, 0));
            } else {
                return Optional.empty();
            }
        };
        inputChannels[channelIndex].addBufferAndAvailability(answer);
    }
    return inputChannels;
}
Also used : DataOutputSerializer(org.apache.flink.core.memory.DataOutputSerializer) EndOfPartitionEvent(org.apache.flink.runtime.io.network.api.EndOfPartitionEvent) BufferBuilderTestUtils.createBufferBuilder(org.apache.flink.runtime.io.network.buffer.BufferBuilderTestUtils.createBufferBuilder) BufferBuilder(org.apache.flink.runtime.io.network.buffer.BufferBuilder) StreamElement(org.apache.flink.streaming.runtime.streamrecord.StreamElement) SerializationDelegate(org.apache.flink.runtime.plugable.SerializationDelegate) AbstractEvent(org.apache.flink.runtime.event.AbstractEvent) ByteBuffer(java.nio.ByteBuffer) EndOfData(org.apache.flink.runtime.io.network.api.EndOfData) BufferAndAvailabilityProvider(org.apache.flink.runtime.io.network.partition.consumer.TestInputChannel.BufferAndAvailabilityProvider) BufferConsumer(org.apache.flink.runtime.io.network.buffer.BufferConsumer) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) BufferAndAvailability(org.apache.flink.runtime.io.network.partition.consumer.InputChannel.BufferAndAvailability)

Aggregations

EndOfData (org.apache.flink.runtime.io.network.api.EndOfData)21 Test (org.junit.Test)12 CheckpointOptions (org.apache.flink.runtime.checkpoint.CheckpointOptions)6 CheckpointBarrier (org.apache.flink.runtime.io.network.api.CheckpointBarrier)6 ResultPartitionWriter (org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter)6 ResultPartition (org.apache.flink.runtime.io.network.partition.ResultPartition)6 CompletingCheckpointResponder (org.apache.flink.streaming.util.CompletingCheckpointResponder)6 ArrayList (java.util.ArrayList)5 CheckpointMetaData (org.apache.flink.runtime.checkpoint.CheckpointMetaData)5 SourceOperatorFactory (org.apache.flink.streaming.api.operators.SourceOperatorFactory)5 IOException (java.io.IOException)4 EndOfPartitionEvent (org.apache.flink.runtime.io.network.api.EndOfPartitionEvent)4 StopMode (org.apache.flink.runtime.io.network.api.StopMode)4 OperatorID (org.apache.flink.runtime.jobgraph.OperatorID)4 Duration (java.time.Duration)3 ArrayDeque (java.util.ArrayDeque)3 Collections (java.util.Collections)3 CompletableFuture (java.util.concurrent.CompletableFuture)3 JobID (org.apache.flink.api.common.JobID)3 BasicTypeInfo (org.apache.flink.api.common.typeinfo.BasicTypeInfo)3