Search in sources :

Example 31 with ResultPartitionWriter

use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.

the class ResultPartitionTest method testNotifyPartitionDataAvailable.

private void testNotifyPartitionDataAvailable(FutureConsumerWithException<ResultPartitionWriter, Exception> notificationCall) throws Exception {
    JobID jobId = new JobID();
    TaskActions taskActions = new NoOpTaskActions();
    {
        // Pipelined, send message => notify
        TestResultPartitionConsumableNotifier notifier = new TestResultPartitionConsumableNotifier();
        ResultPartitionWriter consumableNotifyingPartitionWriter = createConsumableNotifyingResultPartitionWriter(ResultPartitionType.PIPELINED, taskActions, jobId, notifier);
        notificationCall.accept(consumableNotifyingPartitionWriter);
        notifier.check(jobId, consumableNotifyingPartitionWriter.getPartitionId(), taskActions, 1);
    }
    {
        // Blocking, send message => don't notify
        TestResultPartitionConsumableNotifier notifier = new TestResultPartitionConsumableNotifier();
        ResultPartitionWriter partition = createConsumableNotifyingResultPartitionWriter(ResultPartitionType.BLOCKING, taskActions, jobId, notifier);
        notificationCall.accept(partition);
        notifier.check(null, null, null, 0);
    }
}
Also used : ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter) NoOpTaskActions(org.apache.flink.runtime.taskmanager.NoOpTaskActions) TaskActions(org.apache.flink.runtime.taskmanager.TaskActions) NoOpTaskActions(org.apache.flink.runtime.taskmanager.NoOpTaskActions) JobID(org.apache.flink.api.common.JobID)

Example 32 with ResultPartitionWriter

use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.

the class StreamNetworkPointToPointBenchmark method setUp.

/**
 * Initializes the throughput benchmark with the given parameters.
 *
 * @param flushTimeout output flushing interval of the {@link
 *     org.apache.flink.runtime.io.network.api.writer.RecordWriter}'s output flusher thread
 */
public void setUp(long flushTimeout, Configuration config) throws Exception {
    environment = new StreamNetworkBenchmarkEnvironment<>();
    environment.setUp(1, 1, false, -1, -1, config);
    ResultPartitionWriter resultPartitionWriter = environment.createResultPartitionWriter(0);
    recordWriter = new RecordWriterBuilder().setTimeout(flushTimeout).build(resultPartitionWriter);
    receiver = environment.createReceiver();
}
Also used : RecordWriterBuilder(org.apache.flink.runtime.io.network.api.writer.RecordWriterBuilder) ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter)

Example 33 with ResultPartitionWriter

use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.

the class StreamNetworkThroughputBenchmark method setUp.

/**
 * Initializes the throughput benchmark with the given parameters.
 *
 * @param recordWriters number of senders, i.e. {@link
 *     org.apache.flink.runtime.io.network.api.writer.RecordWriter} instances
 * @param channels number of outgoing channels / receivers
 */
public void setUp(int recordWriters, int channels, int flushTimeout, boolean broadcastMode, boolean localMode, int senderBufferPoolSize, int receiverBufferPoolSize, Configuration config) throws Exception {
    environment = new StreamNetworkBenchmarkEnvironment<>();
    environment.setUp(recordWriters, channels, localMode, senderBufferPoolSize, receiverBufferPoolSize, config);
    writerThreads = new LongRecordWriterThread[recordWriters];
    for (int writer = 0; writer < recordWriters; writer++) {
        ResultPartitionWriter resultPartitionWriter = environment.createResultPartitionWriter(writer);
        RecordWriterBuilder recordWriterBuilder = new RecordWriterBuilder().setTimeout(flushTimeout);
        setChannelSelector(recordWriterBuilder, broadcastMode);
        writerThreads[writer] = new LongRecordWriterThread(recordWriterBuilder.build(resultPartitionWriter), broadcastMode);
        writerThreads[writer].start();
    }
    receiver = environment.createReceiver();
}
Also used : RecordWriterBuilder(org.apache.flink.runtime.io.network.api.writer.RecordWriterBuilder) ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter)

Example 34 with ResultPartitionWriter

use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.

the class StreamNetworkBenchmarkEnvironment method createResultPartitionWriter.

public ResultPartitionWriter createResultPartitionWriter(int partitionIndex) throws Exception {
    ResultPartitionWriter resultPartitionWriter = new ResultPartitionBuilder().setResultPartitionId(partitionIds[partitionIndex]).setResultPartitionType(ResultPartitionType.PIPELINED_BOUNDED).setNumberOfSubpartitions(channels).setResultPartitionManager(senderEnv.getResultPartitionManager()).setupBufferPoolFactoryFromNettyShuffleEnvironment(senderEnv).build();
    resultPartitionWriter.setup();
    return resultPartitionWriter;
}
Also used : ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter) ResultPartitionBuilder(org.apache.flink.runtime.io.network.partition.ResultPartitionBuilder)

Example 35 with ResultPartitionWriter

use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.

the class MultipleInputStreamTaskChainedSourcesCheckpointingTest method testTriggerCheckpointWithFinishedChannelsAndSourceChain.

private void testTriggerCheckpointWithFinishedChannelsAndSourceChain(CheckpointOptions checkpointOptions) throws Exception {
    ResultPartition[] partitionWriters = new ResultPartition[2];
    try {
        for (int i = 0; i < partitionWriters.length; ++i) {
            partitionWriters[i] = PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED_BOUNDED);
            partitionWriters[i].setup();
        }
        CompletingCheckpointResponder checkpointResponder = new CompletingCheckpointResponder();
        try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).modifyStreamConfig(config -> {
            config.setCheckpointingEnabled(true);
            config.setUnalignedCheckpointsEnabled(checkpointOptions.isUnalignedCheckpoint() || checkpointOptions.isTimeoutable());
        }).modifyExecutionConfig(applyObjectReuse(objectReuse)).setCheckpointResponder(checkpointResponder).addInput(BasicTypeInfo.INT_TYPE_INFO).addInput(BasicTypeInfo.STRING_TYPE_INFO).addSourceInput(new SourceOperatorFactory<>(new MultipleInputStreamTaskTest.LifeCycleTrackingMockSource(Boundedness.CONTINUOUS_UNBOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).addSourceInput(new SourceOperatorFactory<>(new MultipleInputStreamTaskTest.LifeCycleTrackingMockSource(Boundedness.CONTINUOUS_UNBOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).addAdditionalOutput(partitionWriters).setupOperatorChain(new MapToStringMultipleInputOperatorFactory(4)).finishForSingletonOperatorChain(StringSerializer.INSTANCE).build()) {
            checkpointResponder.setHandlers(testHarness.streamTask::notifyCheckpointCompleteAsync, testHarness.streamTask::notifyCheckpointAbortAsync);
            testHarness.getStreamTask().getCheckpointBarrierHandler().get();
            CompletableFuture<Boolean> checkpointFuture = triggerCheckpoint(testHarness, 2, checkpointOptions);
            testHarness.processAll();
            // The checkpoint 2 would be aligned after received all the EndOfPartitionEvent.
            testHarness.processEvent(new EndOfData(StopMode.DRAIN), 0, 0);
            testHarness.processEvent(new EndOfData(StopMode.DRAIN), 1, 0);
            testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 0, 0);
            testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 1, 0);
            testHarness.getTaskStateManager().getWaitForReportLatch().await();
            assertEquals(2, testHarness.getTaskStateManager().getReportedCheckpointId());
            // Tests triggering checkpoint after all the inputs have received EndOfPartition.
            checkpointFuture = triggerCheckpoint(testHarness, 4, checkpointOptions);
            // Notifies the result partition that all records are processed after the
            // last checkpoint is triggered.
            checkpointFuture.thenAccept((ignored) -> {
                for (ResultPartition resultPartition : partitionWriters) {
                    resultPartition.onSubpartitionAllDataProcessed(0);
                }
            });
            // The checkpoint 4 would be triggered successfully.
            testHarness.processAll();
            testHarness.finishProcessing();
            assertTrue(checkpointFuture.isDone());
            testHarness.getTaskStateManager().getWaitForReportLatch().await();
            assertEquals(4, testHarness.getTaskStateManager().getReportedCheckpointId());
            // Each result partition should have emitted 2 barriers and 1 EndOfUserRecordsEvent.
            for (ResultPartition resultPartition : partitionWriters) {
                assertEquals(3, resultPartition.getNumberOfQueuedBuffers());
            }
        }
    } finally {
        for (ResultPartitionWriter writer : partitionWriters) {
            if (writer != null) {
                writer.close();
            }
        }
    }
}
Also used : CompletingCheckpointResponder(org.apache.flink.streaming.util.CompletingCheckpointResponder) RecordOrEventCollectingResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.RecordOrEventCollectingResultPartitionWriter) ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter) SourceOperatorFactory(org.apache.flink.streaming.api.operators.SourceOperatorFactory) StreamTaskFinalCheckpointsTest.triggerCheckpoint(org.apache.flink.streaming.runtime.tasks.StreamTaskFinalCheckpointsTest.triggerCheckpoint) ResultPartition(org.apache.flink.runtime.io.network.partition.ResultPartition) EndOfData(org.apache.flink.runtime.io.network.api.EndOfData) MapToStringMultipleInputOperatorFactory(org.apache.flink.streaming.runtime.tasks.MultipleInputStreamTaskTest.MapToStringMultipleInputOperatorFactory)

Aggregations

ResultPartitionWriter (org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter)37 ResultPartition (org.apache.flink.runtime.io.network.partition.ResultPartition)12 JobID (org.apache.flink.api.common.JobID)11 IOException (java.io.IOException)10 Test (org.junit.Test)10 CompletingCheckpointResponder (org.apache.flink.streaming.util.CompletingCheckpointResponder)8 FlinkRuntimeException (org.apache.flink.util.FlinkRuntimeException)8 ExecutionAttemptID (org.apache.flink.runtime.executiongraph.ExecutionAttemptID)7 EndOfData (org.apache.flink.runtime.io.network.api.EndOfData)7 CompletableFuture (java.util.concurrent.CompletableFuture)6 CheckpointMetaData (org.apache.flink.runtime.checkpoint.CheckpointMetaData)6 CancelTaskException (org.apache.flink.runtime.execution.CancelTaskException)6 StreamRecord (org.apache.flink.streaming.runtime.streamrecord.StreamRecord)6 ArrayList (java.util.ArrayList)5 Future (java.util.concurrent.Future)5 CheckpointMetrics (org.apache.flink.runtime.checkpoint.CheckpointMetrics)5 CheckpointOptions (org.apache.flink.runtime.checkpoint.CheckpointOptions)5 SavepointType (org.apache.flink.runtime.checkpoint.SavepointType)5 TaskStateSnapshot (org.apache.flink.runtime.checkpoint.TaskStateSnapshot)5 StopMode (org.apache.flink.runtime.io.network.api.StopMode)5