use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class MultipleInputStreamTaskChainedSourcesCheckpointingTest method testTriggerCheckpointWithFinishedChannelsAndSourceChain.
private void testTriggerCheckpointWithFinishedChannelsAndSourceChain(CheckpointOptions checkpointOptions) throws Exception {
ResultPartition[] partitionWriters = new ResultPartition[2];
try {
for (int i = 0; i < partitionWriters.length; ++i) {
partitionWriters[i] = PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED_BOUNDED);
partitionWriters[i].setup();
}
CompletingCheckpointResponder checkpointResponder = new CompletingCheckpointResponder();
try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).modifyStreamConfig(config -> {
config.setCheckpointingEnabled(true);
config.setUnalignedCheckpointsEnabled(checkpointOptions.isUnalignedCheckpoint() || checkpointOptions.isTimeoutable());
}).modifyExecutionConfig(applyObjectReuse(objectReuse)).setCheckpointResponder(checkpointResponder).addInput(BasicTypeInfo.INT_TYPE_INFO).addInput(BasicTypeInfo.STRING_TYPE_INFO).addSourceInput(new SourceOperatorFactory<>(new MultipleInputStreamTaskTest.LifeCycleTrackingMockSource(Boundedness.CONTINUOUS_UNBOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).addSourceInput(new SourceOperatorFactory<>(new MultipleInputStreamTaskTest.LifeCycleTrackingMockSource(Boundedness.CONTINUOUS_UNBOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).addAdditionalOutput(partitionWriters).setupOperatorChain(new MapToStringMultipleInputOperatorFactory(4)).finishForSingletonOperatorChain(StringSerializer.INSTANCE).build()) {
checkpointResponder.setHandlers(testHarness.streamTask::notifyCheckpointCompleteAsync, testHarness.streamTask::notifyCheckpointAbortAsync);
testHarness.getStreamTask().getCheckpointBarrierHandler().get();
CompletableFuture<Boolean> checkpointFuture = triggerCheckpoint(testHarness, 2, checkpointOptions);
testHarness.processAll();
// The checkpoint 2 would be aligned after received all the EndOfPartitionEvent.
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 0, 0);
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 1, 0);
testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 0, 0);
testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 1, 0);
testHarness.getTaskStateManager().getWaitForReportLatch().await();
assertEquals(2, testHarness.getTaskStateManager().getReportedCheckpointId());
// Tests triggering checkpoint after all the inputs have received EndOfPartition.
checkpointFuture = triggerCheckpoint(testHarness, 4, checkpointOptions);
// Notifies the result partition that all records are processed after the
// last checkpoint is triggered.
checkpointFuture.thenAccept((ignored) -> {
for (ResultPartition resultPartition : partitionWriters) {
resultPartition.onSubpartitionAllDataProcessed(0);
}
});
// The checkpoint 4 would be triggered successfully.
testHarness.processAll();
testHarness.finishProcessing();
assertTrue(checkpointFuture.isDone());
testHarness.getTaskStateManager().getWaitForReportLatch().await();
assertEquals(4, testHarness.getTaskStateManager().getReportedCheckpointId());
// Each result partition should have emitted 2 barriers and 1 EndOfUserRecordsEvent.
for (ResultPartition resultPartition : partitionWriters) {
assertEquals(3, resultPartition.getNumberOfQueuedBuffers());
}
}
} finally {
for (ResultPartitionWriter writer : partitionWriters) {
if (writer != null) {
writer.close();
}
}
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class SourceStreamTaskTest method testTriggeringCheckpointAfterSourceThreadFinished.
@Test
public void testTriggeringCheckpointAfterSourceThreadFinished() throws Exception {
ResultPartition[] partitionWriters = new ResultPartition[2];
try (NettyShuffleEnvironment env = new NettyShuffleEnvironmentBuilder().setNumNetworkBuffers(partitionWriters.length * 2).build()) {
for (int i = 0; i < partitionWriters.length; ++i) {
partitionWriters[i] = PartitionTestUtils.createPartition(env, ResultPartitionType.PIPELINED_BOUNDED, 1);
partitionWriters[i].setup();
}
final CompletableFuture<Long> checkpointCompleted = new CompletableFuture<>();
try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(SourceStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).modifyStreamConfig(config -> config.setCheckpointingEnabled(true)).setCheckpointResponder(new TestCheckpointResponder() {
@Override
public void acknowledgeCheckpoint(JobID jobID, ExecutionAttemptID executionAttemptID, long checkpointId, CheckpointMetrics checkpointMetrics, TaskStateSnapshot subtaskState) {
super.acknowledgeCheckpoint(jobID, executionAttemptID, checkpointId, checkpointMetrics, subtaskState);
checkpointCompleted.complete(checkpointId);
}
}).addAdditionalOutput(partitionWriters).setupOperatorChain(new StreamSource<>(new MockSource(0, 0, 1))).finishForSingletonOperatorChain(StringSerializer.INSTANCE).build()) {
testHarness.processAll();
CompletableFuture<Void> taskFinished = testHarness.getStreamTask().getCompletionFuture();
do {
testHarness.processAll();
} while (!taskFinished.isDone());
Future<Boolean> checkpointFuture = triggerCheckpoint(testHarness, 2);
// Notifies the result partition that all records are processed after the
// last checkpoint is triggered.
checkState(checkpointFuture instanceof CompletableFuture, "The trigger future should " + " be also CompletableFuture.");
((CompletableFuture<?>) checkpointFuture).thenAccept((ignored) -> {
for (ResultPartition resultPartition : partitionWriters) {
resultPartition.onSubpartitionAllDataProcessed(0);
}
});
checkpointCompleted.whenComplete((id, error) -> testHarness.getStreamTask().notifyCheckpointCompleteAsync(2));
testHarness.finishProcessing();
assertTrue(checkpointFuture.isDone());
// EndOfUserRecordEvent.
for (ResultPartition resultPartition : partitionWriters) {
assertEquals(3, resultPartition.getNumberOfQueuedBuffers());
}
}
} finally {
for (ResultPartitionWriter writer : partitionWriters) {
if (writer != null) {
writer.close();
}
}
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class MultipleInputStreamTaskTest method testTriggeringCheckpointWithFinishedChannels.
private void testTriggeringCheckpointWithFinishedChannels(CheckpointOptions checkpointOptions) throws Exception {
ResultPartition[] partitionWriters = new ResultPartition[2];
try {
for (int i = 0; i < partitionWriters.length; ++i) {
partitionWriters[i] = PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED_BOUNDED);
partitionWriters[i].setup();
}
CompletingCheckpointResponder checkpointResponder = new CompletingCheckpointResponder();
try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).addInput(BasicTypeInfo.STRING_TYPE_INFO).addInput(BasicTypeInfo.INT_TYPE_INFO).addInput(BasicTypeInfo.DOUBLE_TYPE_INFO).addAdditionalOutput(partitionWriters).setCheckpointResponder(checkpointResponder).modifyStreamConfig(config -> {
config.setCheckpointingEnabled(true);
config.setUnalignedCheckpointsEnabled(checkpointOptions.isUnalignedCheckpoint() || checkpointOptions.isTimeoutable());
}).setupOperatorChain(new MapToStringMultipleInputOperatorFactory(3)).finishForSingletonOperatorChain(StringSerializer.INSTANCE).build()) {
checkpointResponder.setHandlers(testHarness.streamTask::notifyCheckpointCompleteAsync, testHarness.streamTask::notifyCheckpointAbortAsync);
testHarness.getStreamTask().getCheckpointBarrierHandler().get();
// Tests triggering checkpoint when all the inputs are alive.
CompletableFuture<Boolean> checkpointFuture = triggerCheckpoint(testHarness, 2, checkpointOptions);
processMailTillCheckpointSucceeds(testHarness, checkpointFuture);
assertEquals(2, testHarness.getTaskStateManager().getReportedCheckpointId());
// Tests triggering checkpoint after some inputs have received EndOfPartition.
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 0, 0);
testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 0, 0);
checkpointFuture = triggerCheckpoint(testHarness, 4, checkpointOptions);
processMailTillCheckpointSucceeds(testHarness, checkpointFuture);
assertEquals(4, testHarness.getTaskStateManager().getReportedCheckpointId());
// Tests triggering checkpoint after all the inputs have received EndOfPartition.
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 1, 0);
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 2, 0);
testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 1, 0);
testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 2, 0);
checkpointFuture = triggerCheckpoint(testHarness, 6, checkpointOptions);
// Notifies the result partition that all records are processed after the
// last checkpoint is triggered.
checkpointFuture.thenAccept((ignored) -> {
for (ResultPartition resultPartition : partitionWriters) {
resultPartition.onSubpartitionAllDataProcessed(0);
}
});
// The checkpoint 6 would be triggered successfully.
testHarness.processAll();
testHarness.finishProcessing();
assertTrue(checkpointFuture.isDone());
testHarness.getTaskStateManager().getWaitForReportLatch().await();
assertEquals(6, testHarness.getTaskStateManager().getReportedCheckpointId());
// Each result partition should have emitted 3 barriers and 1 EndOfUserRecordsEvent.
for (ResultPartition resultPartition : partitionWriters) {
assertEquals(4, resultPartition.getNumberOfQueuedBuffers());
}
}
} finally {
for (ResultPartitionWriter writer : partitionWriters) {
if (writer != null) {
writer.close();
}
}
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class NettyShuffleMetricFactory method registerLegacyNetworkMetrics.
/**
* Registers legacy network metric groups before shuffle service refactoring.
*
* <p>Registers legacy metric groups if shuffle service implementation is original default one.
*
* @deprecated should be removed in future
*/
@SuppressWarnings("DeprecatedIsStillUsed")
@Deprecated
public static void registerLegacyNetworkMetrics(boolean isDetailedMetrics, MetricGroup metricGroup, ResultPartitionWriter[] producedPartitions, InputGate[] inputGates) {
checkNotNull(metricGroup);
checkNotNull(producedPartitions);
checkNotNull(inputGates);
// add metrics for buffers
final MetricGroup buffersGroup = metricGroup.addGroup(METRIC_GROUP_BUFFERS_DEPRECATED);
// similar to MetricUtils.instantiateNetworkMetrics() but inside this IOMetricGroup
// (metricGroup)
final MetricGroup networkGroup = metricGroup.addGroup(METRIC_GROUP_NETWORK_DEPRECATED);
final MetricGroup outputGroup = networkGroup.addGroup(METRIC_GROUP_OUTPUT);
final MetricGroup inputGroup = networkGroup.addGroup(METRIC_GROUP_INPUT);
ResultPartition[] resultPartitions = Arrays.copyOf(producedPartitions, producedPartitions.length, ResultPartition[].class);
registerOutputMetrics(isDetailedMetrics, outputGroup, buffersGroup, resultPartitions);
SingleInputGate[] singleInputGates = Arrays.copyOf(inputGates, inputGates.length, SingleInputGate[].class);
registerInputMetrics(isDetailedMetrics, inputGroup, buffersGroup, singleInputGates);
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class NettyShuffleEnvironmentTest method testRegisterTaskWithLimitedBuffers.
private void testRegisterTaskWithLimitedBuffers(int bufferPoolSize) throws Exception {
final NettyShuffleEnvironment network = new NettyShuffleEnvironmentBuilder().setNumNetworkBuffers(bufferPoolSize).build();
final ConnectionManager connManager = createDummyConnectionManager();
int channels = 2;
int rp4Channels = 4;
int floatingBuffers = network.getConfiguration().floatingNetworkBuffersPerGate();
int exclusiveBuffers = network.getConfiguration().networkBuffersPerChannel();
int expectedBuffers = channels * exclusiveBuffers + floatingBuffers;
int expectedRp4Buffers = rp4Channels * exclusiveBuffers + floatingBuffers;
// result partitions
ResultPartition rp1 = createPartition(network, ResultPartitionType.PIPELINED, channels);
ResultPartition rp2 = createPartition(network, fileChannelManager, ResultPartitionType.BLOCKING, channels);
ResultPartition rp3 = createPartition(network, ResultPartitionType.PIPELINED_BOUNDED, channels);
ResultPartition rp4 = createPartition(network, ResultPartitionType.PIPELINED_BOUNDED, rp4Channels);
final ResultPartition[] resultPartitions = new ResultPartition[] { rp1, rp2, rp3, rp4 };
// input gates
SingleInputGate ig1 = createSingleInputGate(network, ResultPartitionType.PIPELINED, channels);
SingleInputGate ig2 = createSingleInputGate(network, ResultPartitionType.BLOCKING, channels);
SingleInputGate ig3 = createSingleInputGate(network, ResultPartitionType.PIPELINED_BOUNDED, channels);
SingleInputGate ig4 = createSingleInputGate(network, ResultPartitionType.PIPELINED_BOUNDED, rp4Channels);
InputChannel[] ic1 = new InputChannel[channels];
InputChannel[] ic2 = new InputChannel[channels];
InputChannel[] ic3 = new InputChannel[channels];
InputChannel[] ic4 = new InputChannel[rp4Channels];
final SingleInputGate[] inputGates = new SingleInputGate[] { ig1, ig2, ig3, ig4 };
ic4[0] = createRemoteInputChannel(ig4, 0, rp1, connManager);
ic4[1] = createRemoteInputChannel(ig4, 0, rp2, connManager);
ic4[2] = createRemoteInputChannel(ig4, 0, rp3, connManager);
ic4[3] = createRemoteInputChannel(ig4, 0, rp4, connManager);
ig4.setInputChannels(ic4);
ic1[0] = createRemoteInputChannel(ig1, 1, rp1, connManager);
ic1[1] = createRemoteInputChannel(ig1, 1, rp4, connManager);
ig1.setInputChannels(ic1);
ic2[0] = createRemoteInputChannel(ig2, 1, rp2, connManager);
ic2[1] = createRemoteInputChannel(ig2, 2, rp4, connManager);
ig2.setInputChannels(ic2);
ic3[0] = createRemoteInputChannel(ig3, 1, rp3, connManager);
ic3[1] = createRemoteInputChannel(ig3, 3, rp4, connManager);
ig3.setInputChannels(ic3);
Task.setupPartitionsAndGates(resultPartitions, inputGates);
// verify buffer pools for the result partitions
assertEquals(Integer.MAX_VALUE, rp1.getBufferPool().getMaxNumberOfMemorySegments());
assertEquals(Integer.MAX_VALUE, rp2.getBufferPool().getMaxNumberOfMemorySegments());
assertEquals(expectedBuffers, rp3.getBufferPool().getMaxNumberOfMemorySegments());
assertEquals(expectedRp4Buffers, rp4.getBufferPool().getMaxNumberOfMemorySegments());
for (ResultPartition rp : resultPartitions) {
assertEquals(rp.getNumberOfSubpartitions() + 1, rp.getBufferPool().getNumberOfRequiredMemorySegments());
assertEquals(rp.getNumberOfSubpartitions() + 1, rp.getBufferPool().getNumBuffers());
}
// verify buffer pools for the input gates (NOTE: credit-based uses minimum required buffers
// for exclusive buffers not managed by the buffer pool)
assertEquals(1, ig1.getBufferPool().getNumberOfRequiredMemorySegments());
assertEquals(1, ig2.getBufferPool().getNumberOfRequiredMemorySegments());
assertEquals(1, ig3.getBufferPool().getNumberOfRequiredMemorySegments());
assertEquals(1, ig4.getBufferPool().getNumberOfRequiredMemorySegments());
assertEquals(floatingBuffers, ig1.getBufferPool().getMaxNumberOfMemorySegments());
assertEquals(floatingBuffers, ig2.getBufferPool().getMaxNumberOfMemorySegments());
assertEquals(floatingBuffers, ig3.getBufferPool().getMaxNumberOfMemorySegments());
assertEquals(floatingBuffers, ig4.getBufferPool().getMaxNumberOfMemorySegments());
verify(ig1, times(1)).setupChannels();
verify(ig2, times(1)).setupChannels();
verify(ig3, times(1)).setupChannels();
verify(ig4, times(1)).setupChannels();
for (ResultPartition rp : resultPartitions) {
rp.release();
}
for (SingleInputGate ig : inputGates) {
ig.close();
}
network.close();
}
Aggregations