use of org.apache.flink.util.concurrent.ExecutorThreadFactory in project flink by apache.
the class HBaseRowDataAsyncLookupFunction method open.
@Override
public void open(FunctionContext context) {
LOG.info("start open ...");
final ExecutorService threadPool = Executors.newFixedThreadPool(THREAD_POOL_SIZE, new ExecutorThreadFactory("hbase-async-lookup-worker", Threads.LOGGING_EXCEPTION_HANDLER));
Configuration config = prepareRuntimeConfiguration();
CompletableFuture<AsyncConnection> asyncConnectionFuture = ConnectionFactory.createAsyncConnection(config);
try {
asyncConnection = asyncConnectionFuture.get();
table = asyncConnection.getTable(TableName.valueOf(hTableName), threadPool);
this.cache = cacheMaxSize <= 0 || cacheExpireMs <= 0 ? null : CacheBuilder.newBuilder().recordStats().expireAfterWrite(cacheExpireMs, TimeUnit.MILLISECONDS).maximumSize(cacheMaxSize).build();
if (cache != null && context != null) {
context.getMetricGroup().gauge("lookupCacheHitRate", (Gauge<Double>) () -> cache.stats().hitRate());
}
} catch (InterruptedException | ExecutionException e) {
LOG.error("Exception while creating connection to HBase.", e);
throw new RuntimeException("Cannot create connection to HBase.", e);
}
this.serde = new HBaseSerde(hbaseTableSchema, nullStringLiteral);
LOG.info("end open.");
}
use of org.apache.flink.util.concurrent.ExecutorThreadFactory in project flink by apache.
the class ClusterEntrypoint method initializeServices.
protected void initializeServices(Configuration configuration, PluginManager pluginManager) throws Exception {
LOG.info("Initializing cluster services.");
synchronized (lock) {
resourceId = configuration.getOptional(JobManagerOptions.JOB_MANAGER_RESOURCE_ID).map(value -> DeterminismEnvelope.deterministicValue(new ResourceID(value))).orElseGet(() -> DeterminismEnvelope.nondeterministicValue(ResourceID.generate()));
LOG.debug("Initialize cluster entrypoint {} with resource id {}.", getClass().getSimpleName(), resourceId);
workingDirectory = ClusterEntrypointUtils.createJobManagerWorkingDirectory(configuration, resourceId);
LOG.info("Using working directory: {}.", workingDirectory);
rpcSystem = RpcSystem.load(configuration);
commonRpcService = RpcUtils.createRemoteRpcService(rpcSystem, configuration, configuration.getString(JobManagerOptions.ADDRESS), getRPCPortRange(configuration), configuration.getString(JobManagerOptions.BIND_HOST), configuration.getOptional(JobManagerOptions.RPC_BIND_PORT));
JMXService.startInstance(configuration.getString(JMXServerOptions.JMX_SERVER_PORT));
// update the configuration used to create the high availability services
configuration.setString(JobManagerOptions.ADDRESS, commonRpcService.getAddress());
configuration.setInteger(JobManagerOptions.PORT, commonRpcService.getPort());
ioExecutor = Executors.newFixedThreadPool(ClusterEntrypointUtils.getPoolSize(configuration), new ExecutorThreadFactory("cluster-io"));
haServices = createHaServices(configuration, ioExecutor, rpcSystem);
blobServer = BlobUtils.createBlobServer(configuration, Reference.borrowed(workingDirectory.unwrap().getBlobStorageDirectory()), haServices.createBlobStore());
blobServer.start();
configuration.setString(BlobServerOptions.PORT, String.valueOf(blobServer.getPort()));
heartbeatServices = createHeartbeatServices(configuration);
metricRegistry = createMetricRegistry(configuration, pluginManager, rpcSystem);
final RpcService metricQueryServiceRpcService = MetricUtils.startRemoteMetricsRpcService(configuration, commonRpcService.getAddress(), configuration.getString(JobManagerOptions.BIND_HOST), rpcSystem);
metricRegistry.startQueryService(metricQueryServiceRpcService, null);
final String hostname = RpcUtils.getHostname(commonRpcService);
processMetricGroup = MetricUtils.instantiateProcessMetricGroup(metricRegistry, hostname, ConfigurationUtils.getSystemResourceMetricsProbingInterval(configuration));
executionGraphInfoStore = createSerializableExecutionGraphStore(configuration, commonRpcService.getScheduledExecutor());
}
}
use of org.apache.flink.util.concurrent.ExecutorThreadFactory in project flink by apache.
the class NettyShuffleServiceFactory method createNettyShuffleEnvironment.
@VisibleForTesting
static NettyShuffleEnvironment createNettyShuffleEnvironment(NettyShuffleEnvironmentConfiguration config, ResourceID taskExecutorResourceId, TaskEventPublisher taskEventPublisher, ResultPartitionManager resultPartitionManager, MetricGroup metricGroup, Executor ioExecutor) {
checkNotNull(config);
checkNotNull(taskExecutorResourceId);
checkNotNull(taskEventPublisher);
checkNotNull(resultPartitionManager);
checkNotNull(metricGroup);
NettyConfig nettyConfig = config.nettyConfig();
FileChannelManager fileChannelManager = new FileChannelManagerImpl(config.getTempDirs(), DIR_NAME_PREFIX);
if (LOG.isInfoEnabled()) {
LOG.info("Created a new {} for storing result partitions of BLOCKING shuffles. Used directories:\n\t{}", FileChannelManager.class.getSimpleName(), Arrays.stream(fileChannelManager.getPaths()).map(File::getAbsolutePath).collect(Collectors.joining("\n\t")));
}
ConnectionManager connectionManager = nettyConfig != null ? new NettyConnectionManager(resultPartitionManager, taskEventPublisher, nettyConfig, config.getMaxNumberOfConnections(), config.isConnectionReuseEnabled()) : new LocalConnectionManager();
NetworkBufferPool networkBufferPool = new NetworkBufferPool(config.numNetworkBuffers(), config.networkBufferSize(), config.getRequestSegmentsTimeout());
// we create a separated buffer pool here for batch shuffle instead of reusing the network
// buffer pool directly to avoid potential side effects of memory contention, for example,
// dead lock or "insufficient network buffer" error
BatchShuffleReadBufferPool batchShuffleReadBufferPool = new BatchShuffleReadBufferPool(config.batchShuffleReadMemoryBytes(), config.networkBufferSize());
// we create a separated IO executor pool here for batch shuffle instead of reusing the
// TaskManager IO executor pool directly to avoid the potential side effects of execution
// contention, for example, too long IO or waiting time leading to starvation or timeout
ExecutorService batchShuffleReadIOExecutor = Executors.newFixedThreadPool(Math.max(1, Math.min(batchShuffleReadBufferPool.getMaxConcurrentRequests(), 4 * Hardware.getNumberCPUCores())), new ExecutorThreadFactory("blocking-shuffle-io"));
registerShuffleMetrics(metricGroup, networkBufferPool);
ResultPartitionFactory resultPartitionFactory = new ResultPartitionFactory(resultPartitionManager, fileChannelManager, networkBufferPool, batchShuffleReadBufferPool, batchShuffleReadIOExecutor, config.getBlockingSubpartitionType(), config.networkBuffersPerChannel(), config.floatingNetworkBuffersPerGate(), config.networkBufferSize(), config.isBlockingShuffleCompressionEnabled(), config.getCompressionCodec(), config.getMaxBuffersPerChannel(), config.sortShuffleMinBuffers(), config.sortShuffleMinParallelism(), config.isSSLEnabled());
SingleInputGateFactory singleInputGateFactory = new SingleInputGateFactory(taskExecutorResourceId, config, connectionManager, resultPartitionManager, taskEventPublisher, networkBufferPool);
return new NettyShuffleEnvironment(taskExecutorResourceId, config, networkBufferPool, connectionManager, resultPartitionManager, fileChannelManager, resultPartitionFactory, singleInputGateFactory, ioExecutor, batchShuffleReadBufferPool, batchShuffleReadIOExecutor);
}
use of org.apache.flink.util.concurrent.ExecutorThreadFactory in project flink by apache.
the class TaskManagerRunner method startTaskManagerRunnerServices.
private void startTaskManagerRunnerServices() throws Exception {
synchronized (lock) {
rpcSystem = RpcSystem.load(configuration);
this.executor = Executors.newScheduledThreadPool(Hardware.getNumberCPUCores(), new ExecutorThreadFactory("taskmanager-future"));
highAvailabilityServices = HighAvailabilityServicesUtils.createHighAvailabilityServices(configuration, executor, AddressResolution.NO_ADDRESS_RESOLUTION, rpcSystem, this);
JMXService.startInstance(configuration.getString(JMXServerOptions.JMX_SERVER_PORT));
rpcService = createRpcService(configuration, highAvailabilityServices, rpcSystem);
this.resourceId = getTaskManagerResourceID(configuration, rpcService.getAddress(), rpcService.getPort());
this.workingDirectory = ClusterEntrypointUtils.createTaskManagerWorkingDirectory(configuration, resourceId);
LOG.info("Using working directory: {}", workingDirectory);
HeartbeatServices heartbeatServices = HeartbeatServices.fromConfiguration(configuration);
metricRegistry = new MetricRegistryImpl(MetricRegistryConfiguration.fromConfiguration(configuration, rpcSystem.getMaximumMessageSizeInBytes(configuration)), ReporterSetup.fromConfiguration(configuration, pluginManager));
final RpcService metricQueryServiceRpcService = MetricUtils.startRemoteMetricsRpcService(configuration, rpcService.getAddress(), configuration.getString(TaskManagerOptions.BIND_HOST), rpcSystem);
metricRegistry.startQueryService(metricQueryServiceRpcService, resourceId.unwrap());
blobCacheService = BlobUtils.createBlobCacheService(configuration, Reference.borrowed(workingDirectory.unwrap().getBlobStorageDirectory()), highAvailabilityServices.createBlobStore(), null);
final ExternalResourceInfoProvider externalResourceInfoProvider = ExternalResourceUtils.createStaticExternalResourceInfoProviderFromConfig(configuration, pluginManager);
taskExecutorService = taskExecutorServiceFactory.createTaskExecutor(this.configuration, this.resourceId.unwrap(), rpcService, highAvailabilityServices, heartbeatServices, metricRegistry, blobCacheService, false, externalResourceInfoProvider, workingDirectory.unwrap(), this);
handleUnexpectedTaskExecutorServiceTermination();
MemoryLogger.startIfConfigured(LOG, configuration, terminationFuture.thenAccept(ignored -> {
}));
}
}
use of org.apache.flink.util.concurrent.ExecutorThreadFactory in project flink by apache.
the class JdbcOutputFormat method open.
/**
* Connects to the target database and initializes the prepared statement.
*
* @param taskNumber The number of the parallel instance.
*/
@Override
public void open(int taskNumber, int numTasks) throws IOException {
try {
connectionProvider.getOrEstablishConnection();
} catch (Exception e) {
throw new IOException("unable to open JDBC writer", e);
}
jdbcStatementExecutor = createAndOpenStatementExecutor(statementExecutorFactory);
if (executionOptions.getBatchIntervalMs() != 0 && executionOptions.getBatchSize() != 1) {
this.scheduler = Executors.newScheduledThreadPool(1, new ExecutorThreadFactory("jdbc-upsert-output-format"));
this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(() -> {
synchronized (JdbcOutputFormat.this) {
if (!closed) {
try {
flush();
} catch (Exception e) {
flushException = e;
}
}
}
}, executionOptions.getBatchIntervalMs(), executionOptions.getBatchIntervalMs(), TimeUnit.MILLISECONDS);
}
}
Aggregations