use of org.apache.flink.runtime.io.disk.FileChannelManagerImpl in project flink by apache.
the class NettyShuffleServiceFactory method createNettyShuffleEnvironment.
@VisibleForTesting
static NettyShuffleEnvironment createNettyShuffleEnvironment(NettyShuffleEnvironmentConfiguration config, ResourceID taskExecutorResourceId, TaskEventPublisher taskEventPublisher, ResultPartitionManager resultPartitionManager, MetricGroup metricGroup, Executor ioExecutor) {
checkNotNull(config);
checkNotNull(taskExecutorResourceId);
checkNotNull(taskEventPublisher);
checkNotNull(resultPartitionManager);
checkNotNull(metricGroup);
NettyConfig nettyConfig = config.nettyConfig();
FileChannelManager fileChannelManager = new FileChannelManagerImpl(config.getTempDirs(), DIR_NAME_PREFIX);
if (LOG.isInfoEnabled()) {
LOG.info("Created a new {} for storing result partitions of BLOCKING shuffles. Used directories:\n\t{}", FileChannelManager.class.getSimpleName(), Arrays.stream(fileChannelManager.getPaths()).map(File::getAbsolutePath).collect(Collectors.joining("\n\t")));
}
ConnectionManager connectionManager = nettyConfig != null ? new NettyConnectionManager(resultPartitionManager, taskEventPublisher, nettyConfig, config.getMaxNumberOfConnections(), config.isConnectionReuseEnabled()) : new LocalConnectionManager();
NetworkBufferPool networkBufferPool = new NetworkBufferPool(config.numNetworkBuffers(), config.networkBufferSize(), config.getRequestSegmentsTimeout());
// we create a separated buffer pool here for batch shuffle instead of reusing the network
// buffer pool directly to avoid potential side effects of memory contention, for example,
// dead lock or "insufficient network buffer" error
BatchShuffleReadBufferPool batchShuffleReadBufferPool = new BatchShuffleReadBufferPool(config.batchShuffleReadMemoryBytes(), config.networkBufferSize());
// we create a separated IO executor pool here for batch shuffle instead of reusing the
// TaskManager IO executor pool directly to avoid the potential side effects of execution
// contention, for example, too long IO or waiting time leading to starvation or timeout
ExecutorService batchShuffleReadIOExecutor = Executors.newFixedThreadPool(Math.max(1, Math.min(batchShuffleReadBufferPool.getMaxConcurrentRequests(), 4 * Hardware.getNumberCPUCores())), new ExecutorThreadFactory("blocking-shuffle-io"));
registerShuffleMetrics(metricGroup, networkBufferPool);
ResultPartitionFactory resultPartitionFactory = new ResultPartitionFactory(resultPartitionManager, fileChannelManager, networkBufferPool, batchShuffleReadBufferPool, batchShuffleReadIOExecutor, config.getBlockingSubpartitionType(), config.networkBuffersPerChannel(), config.floatingNetworkBuffersPerGate(), config.networkBufferSize(), config.isBlockingShuffleCompressionEnabled(), config.getCompressionCodec(), config.getMaxBuffersPerChannel(), config.sortShuffleMinBuffers(), config.sortShuffleMinParallelism(), config.isSSLEnabled());
SingleInputGateFactory singleInputGateFactory = new SingleInputGateFactory(taskExecutorResourceId, config, connectionManager, resultPartitionManager, taskEventPublisher, networkBufferPool);
return new NettyShuffleEnvironment(taskExecutorResourceId, config, networkBufferPool, connectionManager, resultPartitionManager, fileChannelManager, resultPartitionFactory, singleInputGateFactory, ioExecutor, batchShuffleReadBufferPool, batchShuffleReadIOExecutor);
}
use of org.apache.flink.runtime.io.disk.FileChannelManagerImpl in project flink by apache.
the class SortMergeResultPartitionTest method setUp.
@Before
public void setUp() {
fileChannelManager = new FileChannelManagerImpl(new String[] { tmpFolder.getRoot().getPath() }, "testing");
globalPool = new NetworkBufferPool(totalBuffers, bufferSize);
readBufferPool = new BatchShuffleReadBufferPool(totalBytes, bufferSize);
readIOExecutor = Executors.newFixedThreadPool(numThreads);
}
Aggregations