use of org.apache.flink.runtime.io.network.NettyShuffleEnvironment in project flink by apache.
the class ResultPartitionTest method testIsAvailableOrNot.
/**
* Tests {@link ResultPartition#getAvailableFuture()}.
*/
@Test
public void testIsAvailableOrNot() throws IOException {
final int numAllBuffers = 10;
final int bufferSize = 1024;
final NettyShuffleEnvironment network = new NettyShuffleEnvironmentBuilder().setNumNetworkBuffers(numAllBuffers).setBufferSize(bufferSize).build();
final ResultPartition resultPartition = createPartition(network, ResultPartitionType.PIPELINED, 1);
try {
resultPartition.setup();
resultPartition.getBufferPool().setNumBuffers(2);
assertTrue(resultPartition.getAvailableFuture().isDone());
resultPartition.emitRecord(ByteBuffer.allocate(bufferSize), 0);
resultPartition.emitRecord(ByteBuffer.allocate(bufferSize), 0);
assertFalse(resultPartition.getAvailableFuture().isDone());
} finally {
resultPartition.release();
network.close();
}
}
use of org.apache.flink.runtime.io.network.NettyShuffleEnvironment in project flink by apache.
the class InputBuffersMetricsTest method testFloatingBuffersUsage.
@Test
public void testFloatingBuffersUsage() throws Exception {
int numberOfRemoteChannelsGate1 = 2;
int numberOfLocalChannelsGate1 = 0;
int numberOfRemoteChannelsGate2 = 1;
int numberOfLocalChannelsGate2 = 1;
int totalNumberOfRemoteChannels = numberOfRemoteChannelsGate1 + numberOfRemoteChannelsGate2;
int buffersPerChannel = 2;
int extraNetworkBuffersPerGate = 8;
NettyShuffleEnvironment network = new NettyShuffleEnvironmentBuilder().setNetworkBuffersPerChannel(buffersPerChannel).setFloatingNetworkBuffersPerGate(extraNetworkBuffersPerGate).build();
closeableRegistry.registerCloseable(network::close);
Tuple2<SingleInputGate, List<RemoteInputChannel>> tuple1 = buildInputGate(network, numberOfRemoteChannelsGate1, numberOfLocalChannelsGate1);
SingleInputGate inputGate2 = buildInputGate(network, numberOfRemoteChannelsGate2, numberOfLocalChannelsGate2).f0;
SingleInputGate inputGate1 = tuple1.f0;
closeableRegistry.registerCloseable(inputGate1::close);
closeableRegistry.registerCloseable(inputGate2::close);
inputGate1.setup();
inputGate2.setup();
RemoteInputChannel remoteInputChannel1 = tuple1.f1.get(0);
SingleInputGate[] inputGates = new SingleInputGate[] { tuple1.f0, inputGate2 };
FloatingBuffersUsageGauge floatingBuffersUsageGauge = new FloatingBuffersUsageGauge(inputGates);
ExclusiveBuffersUsageGauge exclusiveBuffersUsageGauge = new ExclusiveBuffersUsageGauge(inputGates);
CreditBasedInputBuffersUsageGauge inputBuffersUsageGauge = new CreditBasedInputBuffersUsageGauge(floatingBuffersUsageGauge, exclusiveBuffersUsageGauge, inputGates);
assertEquals(0.0, floatingBuffersUsageGauge.getValue(), 0.0);
assertEquals(0.0, inputBuffersUsageGauge.getValue(), 0.0);
// drain gate1's exclusive buffers
drainBuffer(buffersPerChannel, remoteInputChannel1);
int totalBuffers = extraNetworkBuffersPerGate * inputGates.length + buffersPerChannel * totalNumberOfRemoteChannels;
remoteInputChannel1.requestSubpartition();
int backlog = 3;
int totalRequestedBuffers = buffersPerChannel + backlog;
remoteInputChannel1.onSenderBacklog(backlog);
assertEquals(totalRequestedBuffers, remoteInputChannel1.unsynchronizedGetFloatingBuffersAvailable());
drainBuffer(totalRequestedBuffers, remoteInputChannel1);
assertEquals(0, remoteInputChannel1.unsynchronizedGetFloatingBuffersAvailable());
assertEquals((double) (buffersPerChannel + totalRequestedBuffers) / totalBuffers, inputBuffersUsageGauge.getValue(), 0.0001);
}
use of org.apache.flink.runtime.io.network.NettyShuffleEnvironment in project flink by apache.
the class NettyShuffleUtilsTest method testComputeRequiredNetworkBuffers.
/**
* This test verifies that the {@link NettyShuffleEnvironment} requires buffers as expected, so
* that the required shuffle memory size returned by {@link
* ShuffleMaster#computeShuffleMemorySizeForTask(TaskInputsOutputsDescriptor)} is correct.
*/
@Test
public void testComputeRequiredNetworkBuffers() throws Exception {
int numBuffersPerChannel = 5;
int numBuffersPerGate = 8;
int sortShuffleMinParallelism = 8;
int numSortShuffleMinBuffers = 12;
int numChannels1 = 3;
int numChannels2 = 4;
IntermediateDataSetID ds1 = new IntermediateDataSetID();
IntermediateDataSetID ds2 = new IntermediateDataSetID();
IntermediateDataSetID ds3 = new IntermediateDataSetID();
// pipelined shuffle
int numSubs1 = 5;
// hash blocking shuffle
int numSubs2 = 6;
// sort blocking shuffle
int numSubs3 = 10;
Map<IntermediateDataSetID, Integer> subpartitionNums = ImmutableMap.of(ds1, numSubs1, ds2, numSubs2, ds3, numSubs3);
Map<IntermediateDataSetID, ResultPartitionType> partitionTypes = ImmutableMap.of(ds1, PIPELINED_BOUNDED, ds2, BLOCKING, ds3, BLOCKING);
int numTotalBuffers = NettyShuffleUtils.computeNetworkBuffersForAnnouncing(numBuffersPerChannel, numBuffersPerGate, sortShuffleMinParallelism, numSortShuffleMinBuffers, numChannels1 + numChannels2, 2, subpartitionNums, partitionTypes);
NettyShuffleEnvironment sEnv = new NettyShuffleEnvironmentBuilder().setNumNetworkBuffers(numTotalBuffers).setNetworkBuffersPerChannel(numBuffersPerChannel).setSortShuffleMinBuffers(numSortShuffleMinBuffers).setSortShuffleMinParallelism(sortShuffleMinParallelism).build();
SingleInputGate inputGate1 = createInputGate(sEnv, PIPELINED_BOUNDED, numChannels1);
inputGate1.setup();
SingleInputGate inputGate2 = createInputGate(sEnv, BLOCKING, numChannels2);
inputGate2.setup();
ResultPartition resultPartition1 = createResultPartition(sEnv, PIPELINED_BOUNDED, numSubs1);
resultPartition1.setup();
ResultPartition resultPartition2 = createResultPartition(sEnv, BLOCKING, numSubs2);
resultPartition2.setup();
ResultPartition resultPartition3 = createResultPartition(sEnv, BLOCKING, numSubs3);
resultPartition3.setup();
int expected = calculateBuffersConsumption(inputGate1) + calculateBuffersConsumption(inputGate2) + calculateBuffersConsumption(resultPartition1) + calculateBuffersConsumption(resultPartition2) + calculateBuffersConsumption(resultPartition3);
assertEquals(expected, numTotalBuffers);
inputGate1.close();
inputGate2.close();
resultPartition1.close();
resultPartition2.close();
resultPartition3.close();
}
use of org.apache.flink.runtime.io.network.NettyShuffleEnvironment in project flink by apache.
the class CheckpointBarrierTrackerTest method createCheckpointedInputGate.
// ------------------------------------------------------------------------
// Utils
// ------------------------------------------------------------------------
private CheckpointedInputGate createCheckpointedInputGate(int numberOfChannels, AbstractInvokable toNotify) throws IOException {
final NettyShuffleEnvironment environment = new NettyShuffleEnvironmentBuilder().build();
SingleInputGate gate = new SingleInputGateBuilder().setNumberOfChannels(numberOfChannels).setupBufferPoolFactory(environment).build();
gate.setInputChannels(IntStream.range(0, numberOfChannels).mapToObj(channelIndex -> InputChannelBuilder.newBuilder().setChannelIndex(channelIndex).setupFromNettyShuffleEnvironment(environment).setConnectionManager(new TestingConnectionManager()).buildRemoteChannel(gate)).toArray(RemoteInputChannel[]::new));
gate.setup();
gate.requestPartitions();
return createCheckpointedInputGate(gate, toNotify);
}
use of org.apache.flink.runtime.io.network.NettyShuffleEnvironment in project flink by apache.
the class TaskExecutorPartitionLifecycleTest method testBlockingLocalPartitionReleaseDoesNotBlockTaskExecutor.
@Test
public void testBlockingLocalPartitionReleaseDoesNotBlockTaskExecutor() throws Exception {
BlockerSync sync = new BlockerSync();
ResultPartitionManager blockingResultPartitionManager = new ResultPartitionManager() {
@Override
public void releasePartition(ResultPartitionID partitionId, Throwable cause) {
sync.blockNonInterruptible();
super.releasePartition(partitionId, cause);
}
};
NettyShuffleEnvironment shuffleEnvironment = new NettyShuffleEnvironmentBuilder().setResultPartitionManager(blockingResultPartitionManager).setIoExecutor(TEST_EXECUTOR_SERVICE_RESOURCE.getExecutor()).build();
final CompletableFuture<ResultPartitionID> startTrackingFuture = new CompletableFuture<>();
final TaskExecutorPartitionTracker partitionTracker = new TaskExecutorPartitionTrackerImpl(shuffleEnvironment) {
@Override
public void startTrackingPartition(JobID producingJobId, TaskExecutorPartitionInfo partitionInfo) {
super.startTrackingPartition(producingJobId, partitionInfo);
startTrackingFuture.complete(partitionInfo.getResultPartitionId());
}
};
try {
internalTestPartitionRelease(partitionTracker, shuffleEnvironment, startTrackingFuture, (jobId, resultPartitionDeploymentDescriptor, taskExecutor, taskExecutorGateway) -> {
final IntermediateDataSetID dataSetId = resultPartitionDeploymentDescriptor.getResultId();
taskExecutorGateway.releaseClusterPartitions(Collections.singleton(dataSetId), timeout);
// execute some operation to check whether the TaskExecutor is blocked
taskExecutorGateway.canBeReleased().get(5, TimeUnit.SECONDS);
});
} finally {
sync.releaseBlocker();
}
}
Aggregations