Search in sources :

Example 21 with ResultPartitionWriter

use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.

the class OutputBlockedInvokable method invoke.

@Override
public void invoke() throws Exception {
    final IntValue value = new IntValue(1234);
    final ResultPartitionWriter resultPartitionWriter = getEnvironment().getWriter(0);
    final RecordWriter<IntValue> writer = new RecordWriterBuilder<IntValue>().build(resultPartitionWriter);
    while (true) {
        writer.emit(value);
    }
}
Also used : ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter) IntValue(org.apache.flink.types.IntValue)

Example 22 with ResultPartitionWriter

use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.

the class StreamTask method injectChannelStateWriterIntoChannels.

private void injectChannelStateWriterIntoChannels() {
    final Environment env = getEnvironment();
    final ChannelStateWriter channelStateWriter = subtaskCheckpointCoordinator.getChannelStateWriter();
    for (final InputGate gate : env.getAllInputGates()) {
        gate.setChannelStateWriter(channelStateWriter);
    }
    for (ResultPartitionWriter writer : env.getAllWriters()) {
        if (writer instanceof ChannelStateHolder) {
            ((ChannelStateHolder) writer).setChannelStateWriter(channelStateWriter);
        }
    }
}
Also used : ChannelStateWriter(org.apache.flink.runtime.checkpoint.channel.ChannelStateWriter) ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter) Environment(org.apache.flink.runtime.execution.Environment) ChannelStateHolder(org.apache.flink.runtime.io.network.partition.ChannelStateHolder) InputGate(org.apache.flink.runtime.io.network.partition.consumer.InputGate) IndexedInputGate(org.apache.flink.runtime.io.network.partition.consumer.IndexedInputGate)

Example 23 with ResultPartitionWriter

use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.

the class StreamTask method endData.

protected void endData(StopMode mode) throws Exception {
    if (mode == StopMode.DRAIN) {
        advanceToEndOfEventTime();
    }
    // finish all operators in a chain effect way
    operatorChain.finishOperators(actionExecutor, mode);
    this.finishedOperators = true;
    for (ResultPartitionWriter partitionWriter : getEnvironment().getAllWriters()) {
        partitionWriter.notifyEndOfData(mode);
    }
    this.endOfDataReceived = true;
}
Also used : ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter)

Example 24 with ResultPartitionWriter

use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.

the class SubtaskCheckpointCoordinatorTest method testBroadcastCancelCheckpointMarkerOnAbortingFromCoordinator.

@Test
public void testBroadcastCancelCheckpointMarkerOnAbortingFromCoordinator() throws Exception {
    OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(OneInputStreamTask::new, 1, 1, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
    testHarness.setupOutputForSingletonOperatorChain();
    StreamConfig streamConfig = testHarness.getStreamConfig();
    streamConfig.setStreamOperator(new MapOperator());
    testHarness.invoke();
    testHarness.waitForTaskRunning();
    MockEnvironment mockEnvironment = MockEnvironment.builder().build();
    try (SubtaskCheckpointCoordinator subtaskCheckpointCoordinator = new MockSubtaskCheckpointCoordinatorBuilder().setEnvironment(mockEnvironment).build()) {
        ArrayList<Object> recordOrEvents = new ArrayList<>();
        StreamElementSerializer<String> stringStreamElementSerializer = new StreamElementSerializer<>(StringSerializer.INSTANCE);
        ResultPartitionWriter resultPartitionWriter = new RecordOrEventCollectingResultPartitionWriter<>(recordOrEvents, stringStreamElementSerializer);
        mockEnvironment.addOutputs(Collections.singletonList(resultPartitionWriter));
        OneInputStreamTask<String, String> task = testHarness.getTask();
        OperatorChain<String, OneInputStreamOperator<String, String>> operatorChain = new RegularOperatorChain<>(task, StreamTask.createRecordWriterDelegate(streamConfig, mockEnvironment));
        long checkpointId = 42L;
        // notify checkpoint aborted before execution.
        subtaskCheckpointCoordinator.notifyCheckpointAborted(checkpointId, operatorChain, () -> true);
        subtaskCheckpointCoordinator.checkpointState(new CheckpointMetaData(checkpointId, System.currentTimeMillis()), CheckpointOptions.forCheckpointWithDefaultLocation(), new CheckpointMetricsBuilder(), operatorChain, false, () -> false);
        assertEquals(1, recordOrEvents.size());
        Object recordOrEvent = recordOrEvents.get(0);
        // ensure CancelCheckpointMarker is broadcast downstream.
        assertTrue(recordOrEvent instanceof CancelCheckpointMarker);
        assertEquals(checkpointId, ((CancelCheckpointMarker) recordOrEvent).getCheckpointId());
        testHarness.endInput();
        testHarness.waitForTaskCompletion();
    }
}
Also used : CheckpointMetricsBuilder(org.apache.flink.runtime.checkpoint.CheckpointMetricsBuilder) RecordOrEventCollectingResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.RecordOrEventCollectingResultPartitionWriter) ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter) ArrayList(java.util.ArrayList) CancelCheckpointMarker(org.apache.flink.runtime.io.network.api.CancelCheckpointMarker) StreamConfig(org.apache.flink.streaming.api.graph.StreamConfig) CheckpointMetaData(org.apache.flink.runtime.checkpoint.CheckpointMetaData) OneInputStreamOperator(org.apache.flink.streaming.api.operators.OneInputStreamOperator) MockEnvironment(org.apache.flink.runtime.operators.testutils.MockEnvironment) StreamElementSerializer(org.apache.flink.streaming.runtime.streamrecord.StreamElementSerializer) RecordOrEventCollectingResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.RecordOrEventCollectingResultPartitionWriter) Test(org.junit.Test)

Example 25 with ResultPartitionWriter

use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.

the class StreamTaskFinalCheckpointsTest method testWaitingForFinalCheckpoint.

@Test
public void testWaitingForFinalCheckpoint() throws Exception {
    ResultPartition[] partitionWriters = new ResultPartition[2];
    try {
        for (int i = 0; i < partitionWriters.length; ++i) {
            partitionWriters[i] = PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED_BOUNDED);
            partitionWriters[i].setup();
        }
        int lastCheckpointId = 6;
        CompletingCheckpointResponder checkpointResponder = new CompletingCheckpointResponder();
        try (StreamTaskMailboxTestHarness<String> testHarness = createTestHarness(partitionWriters, checkpointResponder, false)) {
            // Tests triggering checkpoint when all the inputs are alive.
            CompletableFuture<Boolean> checkpointFuture = triggerCheckpoint(testHarness, 2);
            processMailTillCheckpointSucceeds(testHarness, checkpointFuture);
            assertEquals(2, testHarness.getTaskStateManager().getReportedCheckpointId());
            // Tests triggering checkpoint after some inputs have received EndOfPartition.
            testHarness.processEvent(new EndOfData(StopMode.DRAIN), 0, 0);
            testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 0, 0);
            checkpointFuture = triggerCheckpoint(testHarness, 4);
            processMailTillCheckpointSucceeds(testHarness, checkpointFuture);
            assertEquals(4, testHarness.getTaskStateManager().getReportedCheckpointId());
            // Tests triggering checkpoint after received all the inputs have received
            // EndOfPartition.
            testHarness.processEvent(new EndOfData(StopMode.DRAIN), 0, 1);
            testHarness.processEvent(new EndOfData(StopMode.DRAIN), 0, 2);
            testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 0, 1);
            testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 0, 2);
            checkpointFuture = triggerCheckpoint(testHarness, lastCheckpointId);
            // Notifies the result partition that all records are processed after the
            // last checkpoint is triggered.
            checkpointFuture.thenAccept((ignored) -> {
                for (ResultPartition resultPartition : partitionWriters) {
                    resultPartition.onSubpartitionAllDataProcessed(0);
                }
            });
            // The checkpoint 6 would be triggered successfully.
            testHarness.finishProcessing();
            assertTrue(checkpointFuture.isDone());
            testHarness.getTaskStateManager().getWaitForReportLatch().await();
            assertEquals(6, testHarness.getTaskStateManager().getReportedCheckpointId());
            assertEquals(6, testHarness.getTaskStateManager().getNotifiedCompletedCheckpointId());
            // Each result partition should have emitted 3 barriers and 1 EndOfUserRecordsEvent.
            for (ResultPartition resultPartition : partitionWriters) {
                assertEquals(4, resultPartition.getNumberOfQueuedBuffers());
            }
        }
    } finally {
        for (ResultPartitionWriter writer : partitionWriters) {
            if (writer != null) {
                writer.close();
            }
        }
    }
}
Also used : EndOfData(org.apache.flink.runtime.io.network.api.EndOfData) CompletingCheckpointResponder(org.apache.flink.streaming.util.CompletingCheckpointResponder) ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter) ResultPartition(org.apache.flink.runtime.io.network.partition.ResultPartition) PipelinedResultPartition(org.apache.flink.runtime.io.network.partition.PipelinedResultPartition) Test(org.junit.Test)

Aggregations

ResultPartitionWriter (org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter)37 ResultPartition (org.apache.flink.runtime.io.network.partition.ResultPartition)12 JobID (org.apache.flink.api.common.JobID)11 IOException (java.io.IOException)10 Test (org.junit.Test)10 CompletingCheckpointResponder (org.apache.flink.streaming.util.CompletingCheckpointResponder)8 FlinkRuntimeException (org.apache.flink.util.FlinkRuntimeException)8 ExecutionAttemptID (org.apache.flink.runtime.executiongraph.ExecutionAttemptID)7 EndOfData (org.apache.flink.runtime.io.network.api.EndOfData)7 CompletableFuture (java.util.concurrent.CompletableFuture)6 CheckpointMetaData (org.apache.flink.runtime.checkpoint.CheckpointMetaData)6 CancelTaskException (org.apache.flink.runtime.execution.CancelTaskException)6 StreamRecord (org.apache.flink.streaming.runtime.streamrecord.StreamRecord)6 ArrayList (java.util.ArrayList)5 Future (java.util.concurrent.Future)5 CheckpointMetrics (org.apache.flink.runtime.checkpoint.CheckpointMetrics)5 CheckpointOptions (org.apache.flink.runtime.checkpoint.CheckpointOptions)5 SavepointType (org.apache.flink.runtime.checkpoint.SavepointType)5 TaskStateSnapshot (org.apache.flink.runtime.checkpoint.TaskStateSnapshot)5 StopMode (org.apache.flink.runtime.io.network.api.StopMode)5