use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class OrcBulkWriterFactory method getWriterOptions.
@VisibleForTesting
protected OrcFile.WriterOptions getWriterOptions() {
if (null == writerOptions) {
Configuration conf = new ThreadLocalClassLoaderConfiguration();
for (Map.Entry<String, String> entry : confMap.entrySet()) {
conf.set(entry.getKey(), entry.getValue());
}
writerOptions = OrcFile.writerOptions(writerProperties, conf);
writerOptions.setSchema(this.vectorizer.getSchema());
}
return writerOptions;
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class ResultPartitionFactory method create.
@VisibleForTesting
public ResultPartition create(String taskNameWithSubtaskAndId, int partitionIndex, ResultPartitionID id, ResultPartitionType type, int numberOfSubpartitions, int maxParallelism, SupplierWithException<BufferPool, IOException> bufferPoolFactory) {
BufferCompressor bufferCompressor = null;
if (type.isBlocking() && blockingShuffleCompressionEnabled) {
bufferCompressor = new BufferCompressor(networkBufferSize, compressionCodec);
}
ResultSubpartition[] subpartitions = new ResultSubpartition[numberOfSubpartitions];
final ResultPartition partition;
if (type == ResultPartitionType.PIPELINED || type == ResultPartitionType.PIPELINED_BOUNDED || type == ResultPartitionType.PIPELINED_APPROXIMATE) {
final PipelinedResultPartition pipelinedPartition = new PipelinedResultPartition(taskNameWithSubtaskAndId, partitionIndex, id, type, subpartitions, maxParallelism, partitionManager, bufferCompressor, bufferPoolFactory);
for (int i = 0; i < subpartitions.length; i++) {
if (type == ResultPartitionType.PIPELINED_APPROXIMATE) {
subpartitions[i] = new PipelinedApproximateSubpartition(i, configuredNetworkBuffersPerChannel, pipelinedPartition);
} else {
subpartitions[i] = new PipelinedSubpartition(i, configuredNetworkBuffersPerChannel, pipelinedPartition);
}
}
partition = pipelinedPartition;
} else if (type == ResultPartitionType.BLOCKING || type == ResultPartitionType.BLOCKING_PERSISTENT) {
if (numberOfSubpartitions >= sortShuffleMinParallelism) {
partition = new SortMergeResultPartition(taskNameWithSubtaskAndId, partitionIndex, id, type, subpartitions.length, maxParallelism, batchShuffleReadBufferPool, batchShuffleReadIOExecutor, partitionManager, channelManager.createChannel().getPath(), bufferCompressor, bufferPoolFactory);
} else {
final BoundedBlockingResultPartition blockingPartition = new BoundedBlockingResultPartition(taskNameWithSubtaskAndId, partitionIndex, id, type, subpartitions, maxParallelism, partitionManager, bufferCompressor, bufferPoolFactory);
initializeBoundedBlockingPartitions(subpartitions, blockingPartition, blockingSubpartitionType, networkBufferSize, channelManager, sslEnabled);
partition = blockingPartition;
}
} else {
throw new IllegalArgumentException("Unrecognized ResultPartitionType: " + type);
}
LOG.debug("{}: Initialized {}", taskNameWithSubtaskAndId, this);
return partition;
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class NettyShuffleServiceFactory method createNettyShuffleEnvironment.
@VisibleForTesting
static NettyShuffleEnvironment createNettyShuffleEnvironment(NettyShuffleEnvironmentConfiguration config, ResourceID taskExecutorResourceId, TaskEventPublisher taskEventPublisher, ResultPartitionManager resultPartitionManager, MetricGroup metricGroup, Executor ioExecutor) {
checkNotNull(config);
checkNotNull(taskExecutorResourceId);
checkNotNull(taskEventPublisher);
checkNotNull(resultPartitionManager);
checkNotNull(metricGroup);
NettyConfig nettyConfig = config.nettyConfig();
FileChannelManager fileChannelManager = new FileChannelManagerImpl(config.getTempDirs(), DIR_NAME_PREFIX);
if (LOG.isInfoEnabled()) {
LOG.info("Created a new {} for storing result partitions of BLOCKING shuffles. Used directories:\n\t{}", FileChannelManager.class.getSimpleName(), Arrays.stream(fileChannelManager.getPaths()).map(File::getAbsolutePath).collect(Collectors.joining("\n\t")));
}
ConnectionManager connectionManager = nettyConfig != null ? new NettyConnectionManager(resultPartitionManager, taskEventPublisher, nettyConfig, config.getMaxNumberOfConnections(), config.isConnectionReuseEnabled()) : new LocalConnectionManager();
NetworkBufferPool networkBufferPool = new NetworkBufferPool(config.numNetworkBuffers(), config.networkBufferSize(), config.getRequestSegmentsTimeout());
// we create a separated buffer pool here for batch shuffle instead of reusing the network
// buffer pool directly to avoid potential side effects of memory contention, for example,
// dead lock or "insufficient network buffer" error
BatchShuffleReadBufferPool batchShuffleReadBufferPool = new BatchShuffleReadBufferPool(config.batchShuffleReadMemoryBytes(), config.networkBufferSize());
// we create a separated IO executor pool here for batch shuffle instead of reusing the
// TaskManager IO executor pool directly to avoid the potential side effects of execution
// contention, for example, too long IO or waiting time leading to starvation or timeout
ExecutorService batchShuffleReadIOExecutor = Executors.newFixedThreadPool(Math.max(1, Math.min(batchShuffleReadBufferPool.getMaxConcurrentRequests(), 4 * Hardware.getNumberCPUCores())), new ExecutorThreadFactory("blocking-shuffle-io"));
registerShuffleMetrics(metricGroup, networkBufferPool);
ResultPartitionFactory resultPartitionFactory = new ResultPartitionFactory(resultPartitionManager, fileChannelManager, networkBufferPool, batchShuffleReadBufferPool, batchShuffleReadIOExecutor, config.getBlockingSubpartitionType(), config.networkBuffersPerChannel(), config.floatingNetworkBuffersPerGate(), config.networkBufferSize(), config.isBlockingShuffleCompressionEnabled(), config.getCompressionCodec(), config.getMaxBuffersPerChannel(), config.sortShuffleMinBuffers(), config.sortShuffleMinParallelism(), config.isSSLEnabled());
SingleInputGateFactory singleInputGateFactory = new SingleInputGateFactory(taskExecutorResourceId, config, connectionManager, resultPartitionManager, taskEventPublisher, networkBufferPool);
return new NettyShuffleEnvironment(taskExecutorResourceId, config, networkBufferPool, connectionManager, resultPartitionManager, fileChannelManager, resultPartitionFactory, singleInputGateFactory, ioExecutor, batchShuffleReadBufferPool, batchShuffleReadIOExecutor);
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class BufferDecompressor method decompressToOriginalBuffer.
/**
* The difference between this method and {@link #decompressToIntermediateBuffer(Buffer)} is
* that this method copies the decompressed data to the input {@link Buffer} starting from
* offset 0.
*
* <p>The caller must guarantee that the input {@link Buffer} is writable and there's enough
* space left.
*/
@VisibleForTesting
public Buffer decompressToOriginalBuffer(Buffer buffer) {
int decompressedLen = decompress(buffer);
// copy the decompressed data back
int memorySegmentOffset = buffer.getMemorySegmentOffset();
MemorySegment segment = buffer.getMemorySegment();
segment.put(memorySegmentOffset, internalBuffer.array(), 0, decompressedLen);
return new ReadOnlySlicedNetworkBuffer(buffer.asByteBuf(), 0, decompressedLen, memorySegmentOffset, false);
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class ZooKeeperUtils method startCuratorFramework.
/**
* Starts a {@link CuratorFramework} instance and connects it to the given ZooKeeper quorum from
* a builder.
*
* @param builder {@link CuratorFrameworkFactory.Builder} A builder for curatorFramework.
* @param fatalErrorHandler {@link FatalErrorHandler} fatalErrorHandler to handle unexpected
* errors of {@link CuratorFramework}
* @return {@link CuratorFrameworkWithUnhandledErrorListener} instance
*/
@VisibleForTesting
public static CuratorFrameworkWithUnhandledErrorListener startCuratorFramework(CuratorFrameworkFactory.Builder builder, FatalErrorHandler fatalErrorHandler) {
CuratorFramework cf = builder.build();
UnhandledErrorListener unhandledErrorListener = (message, throwable) -> {
LOG.error("Unhandled error in curator framework, error message: {}", message, throwable);
// The exception thrown in UnhandledErrorListener will be caught by
// CuratorFramework. So we mostly trigger exit process or interact with main
// thread to inform the failure in FatalErrorHandler.
fatalErrorHandler.onFatalError(throwable);
};
cf.getUnhandledErrorListenable().addListener(unhandledErrorListener);
cf.start();
return new CuratorFrameworkWithUnhandledErrorListener(cf, unhandledErrorListener);
}
Aggregations