use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class StreamTaskFinalCheckpointsTest method testReportOperatorsFinishedInCheckpoint.
@Test
public void testReportOperatorsFinishedInCheckpoint() throws Exception {
ResultPartition[] partitionWriters = new ResultPartition[2];
try {
for (int i = 0; i < partitionWriters.length; ++i) {
partitionWriters[i] = PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED_BOUNDED);
partitionWriters[i].setup();
}
final CompletingCheckpointResponder checkpointResponder = new CompletingCheckpointResponder();
try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(OneInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).addInput(BasicTypeInfo.STRING_TYPE_INFO, 1).addAdditionalOutput(partitionWriters).setCheckpointResponder(checkpointResponder).modifyStreamConfig(config -> config.setCheckpointingEnabled(true)).setupOperatorChain(new StatefulOperator()).finishForSingletonOperatorChain(StringSerializer.INSTANCE).build()) {
checkpointResponder.setHandlers(testHarness.streamTask::notifyCheckpointCompleteAsync, testHarness.streamTask::notifyCheckpointAbortAsync);
// Trigger the first checkpoint before we call operators' finish method.
CompletableFuture<Boolean> checkpointFuture = triggerCheckpoint(testHarness, 2);
processMailTillCheckpointSucceeds(testHarness, checkpointFuture);
assertEquals(2, testHarness.getTaskStateManager().getReportedCheckpointId());
assertFalse(testHarness.getTaskStateManager().getJobManagerTaskStateSnapshotsByCheckpointId().get(2L).isTaskFinished());
// Trigger the first checkpoint after we call operators' finish method.
// The checkpoint is added to the mailbox and will be processed in the
// mailbox loop after call operators' finish method in the afterInvoke()
// method.
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 0, 0);
checkpointFuture = triggerCheckpoint(testHarness, 4);
checkpointFuture.thenAccept((ignored) -> {
for (ResultPartition resultPartition : partitionWriters) {
resultPartition.onSubpartitionAllDataProcessed(0);
}
});
testHarness.processAll();
testHarness.finishProcessing();
assertTrue(checkpointFuture.isDone());
testHarness.getTaskStateManager().getWaitForReportLatch().await();
assertTrue(testHarness.getTaskStateManager().getJobManagerTaskStateSnapshotsByCheckpointId().get(4L).isTaskFinished());
}
} finally {
for (ResultPartitionWriter writer : partitionWriters) {
if (writer != null) {
writer.close();
}
}
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class SingleInputGateTest method testUpdateUnknownInputChannel.
/**
* Tests that input gate can successfully convert unknown input channels into local and remote
* channels.
*/
@Test
public void testUpdateUnknownInputChannel() throws Exception {
final NettyShuffleEnvironment network = createNettyShuffleEnvironment();
final ResultPartition localResultPartition = new ResultPartitionBuilder().setResultPartitionManager(network.getResultPartitionManager()).setupBufferPoolFactoryFromNettyShuffleEnvironment(network).build();
final ResultPartition remoteResultPartition = new ResultPartitionBuilder().setResultPartitionManager(network.getResultPartitionManager()).setupBufferPoolFactoryFromNettyShuffleEnvironment(network).build();
localResultPartition.setup();
remoteResultPartition.setup();
final SingleInputGate inputGate = createInputGate(network, 2, ResultPartitionType.PIPELINED);
final InputChannel[] inputChannels = new InputChannel[2];
try (Closer closer = Closer.create()) {
closer.register(network::close);
closer.register(inputGate::close);
final ResultPartitionID localResultPartitionId = localResultPartition.getPartitionId();
inputChannels[0] = buildUnknownInputChannel(network, inputGate, localResultPartitionId, 0);
final ResultPartitionID remoteResultPartitionId = remoteResultPartition.getPartitionId();
inputChannels[1] = buildUnknownInputChannel(network, inputGate, remoteResultPartitionId, 1);
inputGate.setInputChannels(inputChannels);
inputGate.setup();
assertThat(inputGate.getInputChannels().get(createSubpartitionInfo(remoteResultPartitionId.getPartitionId())), is(instanceOf((UnknownInputChannel.class))));
assertThat(inputGate.getInputChannels().get(createSubpartitionInfo(localResultPartitionId.getPartitionId())), is(instanceOf((UnknownInputChannel.class))));
ResourceID localLocation = ResourceID.generate();
// Trigger updates to remote input channel from unknown input channel
inputGate.updateInputChannel(localLocation, createRemoteWithIdAndLocation(remoteResultPartitionId.getPartitionId(), ResourceID.generate()));
assertThat(inputGate.getInputChannels().get(createSubpartitionInfo(remoteResultPartitionId.getPartitionId())), is(instanceOf((RemoteInputChannel.class))));
assertThat(inputGate.getInputChannels().get(createSubpartitionInfo(localResultPartitionId.getPartitionId())), is(instanceOf((UnknownInputChannel.class))));
// Trigger updates to local input channel from unknown input channel
inputGate.updateInputChannel(localLocation, createRemoteWithIdAndLocation(localResultPartitionId.getPartitionId(), localLocation));
assertThat(inputGate.getInputChannels().get(createSubpartitionInfo(remoteResultPartitionId.getPartitionId())), is(instanceOf((RemoteInputChannel.class))));
assertThat(inputGate.getInputChannels().get(createSubpartitionInfo(localResultPartitionId.getPartitionId())), is(instanceOf((LocalInputChannel.class))));
}
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class NettyShuffleUtilsTest method createResultPartition.
private ResultPartition createResultPartition(NettyShuffleEnvironment network, ResultPartitionType resultPartitionType, int numSubpartitions) {
ShuffleDescriptor shuffleDescriptor = createRemoteWithIdAndLocation(new IntermediateResultPartitionID(), ResourceID.generate());
PartitionDescriptor partitionDescriptor = new PartitionDescriptor(new IntermediateDataSetID(), 2, shuffleDescriptor.getResultPartitionID().getPartitionId(), resultPartitionType, numSubpartitions, 0);
ResultPartitionDeploymentDescriptor resultPartitionDeploymentDescriptor = new ResultPartitionDeploymentDescriptor(partitionDescriptor, shuffleDescriptor, 1, true);
ExecutionAttemptID consumerID = new ExecutionAttemptID();
Collection<ResultPartition> resultPartitions = network.createResultPartitionWriters(network.createShuffleIOOwnerContext("", consumerID, new UnregisteredMetricsGroup()), Collections.singletonList(resultPartitionDeploymentDescriptor));
return resultPartitions.iterator().next();
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class NettyShuffleUtilsTest method testComputeRequiredNetworkBuffers.
/**
* This test verifies that the {@link NettyShuffleEnvironment} requires buffers as expected, so
* that the required shuffle memory size returned by {@link
* ShuffleMaster#computeShuffleMemorySizeForTask(TaskInputsOutputsDescriptor)} is correct.
*/
@Test
public void testComputeRequiredNetworkBuffers() throws Exception {
int numBuffersPerChannel = 5;
int numBuffersPerGate = 8;
int sortShuffleMinParallelism = 8;
int numSortShuffleMinBuffers = 12;
int numChannels1 = 3;
int numChannels2 = 4;
IntermediateDataSetID ds1 = new IntermediateDataSetID();
IntermediateDataSetID ds2 = new IntermediateDataSetID();
IntermediateDataSetID ds3 = new IntermediateDataSetID();
// pipelined shuffle
int numSubs1 = 5;
// hash blocking shuffle
int numSubs2 = 6;
// sort blocking shuffle
int numSubs3 = 10;
Map<IntermediateDataSetID, Integer> subpartitionNums = ImmutableMap.of(ds1, numSubs1, ds2, numSubs2, ds3, numSubs3);
Map<IntermediateDataSetID, ResultPartitionType> partitionTypes = ImmutableMap.of(ds1, PIPELINED_BOUNDED, ds2, BLOCKING, ds3, BLOCKING);
int numTotalBuffers = NettyShuffleUtils.computeNetworkBuffersForAnnouncing(numBuffersPerChannel, numBuffersPerGate, sortShuffleMinParallelism, numSortShuffleMinBuffers, numChannels1 + numChannels2, 2, subpartitionNums, partitionTypes);
NettyShuffleEnvironment sEnv = new NettyShuffleEnvironmentBuilder().setNumNetworkBuffers(numTotalBuffers).setNetworkBuffersPerChannel(numBuffersPerChannel).setSortShuffleMinBuffers(numSortShuffleMinBuffers).setSortShuffleMinParallelism(sortShuffleMinParallelism).build();
SingleInputGate inputGate1 = createInputGate(sEnv, PIPELINED_BOUNDED, numChannels1);
inputGate1.setup();
SingleInputGate inputGate2 = createInputGate(sEnv, BLOCKING, numChannels2);
inputGate2.setup();
ResultPartition resultPartition1 = createResultPartition(sEnv, PIPELINED_BOUNDED, numSubs1);
resultPartition1.setup();
ResultPartition resultPartition2 = createResultPartition(sEnv, BLOCKING, numSubs2);
resultPartition2.setup();
ResultPartition resultPartition3 = createResultPartition(sEnv, BLOCKING, numSubs3);
resultPartition3.setup();
int expected = calculateBuffersConsumption(inputGate1) + calculateBuffersConsumption(inputGate2) + calculateBuffersConsumption(resultPartition1) + calculateBuffersConsumption(resultPartition2) + calculateBuffersConsumption(resultPartition3);
assertEquals(expected, numTotalBuffers);
inputGate1.close();
inputGate2.close();
resultPartition1.close();
resultPartition2.close();
resultPartition3.close();
}
use of org.apache.flink.runtime.io.network.partition.ResultPartition in project flink by apache.
the class ChannelPersistenceITCase method buildResultPartition.
private BufferWritingResultPartition buildResultPartition(NetworkBufferPool networkBufferPool, ResultPartitionType resultPartitionType, int index, int numberOfSubpartitions) throws IOException {
ResultPartition resultPartition = new ResultPartitionBuilder().setResultPartitionIndex(index).setResultPartitionType(resultPartitionType).setNumberOfSubpartitions(numberOfSubpartitions).setBufferPoolFactory(() -> networkBufferPool.createBufferPool(numberOfSubpartitions, Integer.MAX_VALUE, numberOfSubpartitions, Integer.MAX_VALUE)).build();
resultPartition.setup();
return (BufferWritingResultPartition) resultPartition;
}
Aggregations