use of io.airlift.concurrent.BoundedExecutor in project presto by prestodb.
the class TransactionManager method beginTransaction.
public TransactionId beginTransaction(IsolationLevel isolationLevel, boolean readOnly, boolean autoCommitContext) {
TransactionId transactionId = TransactionId.create();
BoundedExecutor executor = new BoundedExecutor(finishingExecutor, maxFinishingConcurrency);
TransactionMetadata transactionMetadata = new TransactionMetadata(transactionId, isolationLevel, readOnly, autoCommitContext, catalogManager, executor);
checkState(transactions.put(transactionId, transactionMetadata) == null, "Duplicate transaction ID: %s", transactionId);
return transactionId;
}
use of io.airlift.concurrent.BoundedExecutor in project presto by prestodb.
the class AbstractTestHiveClientS3 method setup.
protected void setup(String host, int port, String databaseName, String awsAccessKey, String awsSecretKey, String writableBucket) {
this.writableBucket = writableBucket;
setupHive(databaseName);
HiveS3Config s3Config = new HiveS3Config().setS3AwsAccessKey(awsAccessKey).setS3AwsSecretKey(awsSecretKey);
HiveClientConfig hiveClientConfig = new HiveClientConfig();
String proxy = System.getProperty("hive.metastore.thrift.client.socks-proxy");
if (proxy != null) {
hiveClientConfig.setMetastoreSocksProxy(HostAndPort.fromString(proxy));
}
HiveConnectorId connectorId = new HiveConnectorId("hive-test");
HiveCluster hiveCluster = new TestingHiveCluster(hiveClientConfig, host, port);
ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("hive-s3-%s"));
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationUpdater(hiveClientConfig, s3Config));
HivePartitionManager hivePartitionManager = new HivePartitionManager(connectorId, TYPE_MANAGER, hiveClientConfig);
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig, new NoHdfsAuthentication());
metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster)), executor, hiveClientConfig, writableBucket, hdfsEnvironment);
locationService = new HiveLocationService(hdfsEnvironment);
TypeRegistry typeManager = new TypeRegistry();
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(connectorId, hiveClientConfig, metastoreClient, hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), typeManager, locationService, new TableParameterCodec(), partitionUpdateCodec, new HiveTypeTranslator(), new NodeVersion("test_version"));
transactionManager = new HiveTransactionManager();
splitManager = new HiveSplitManager(connectorId, transactionHandle -> ((HiveMetadata) transactionManager.get(transactionHandle)).getMetastore(), new NamenodeStats(), hdfsEnvironment, new HadoopDirectoryLister(), new BoundedExecutor(executor, hiveClientConfig.getMaxSplitIteratorThreads()), new HiveCoercionPolicy(typeManager), hiveClientConfig.getMaxOutstandingSplits(), hiveClientConfig.getMinPartitionBatchSize(), hiveClientConfig.getMaxPartitionBatchSize(), hiveClientConfig.getMaxInitialSplits(), hiveClientConfig.getRecursiveDirWalkerEnabled());
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(hiveClientConfig), hdfsEnvironment, metastoreClient, new GroupByHashPageIndexerFactory(new JoinCompiler()), typeManager, new HiveClientConfig(), locationService, partitionUpdateCodec);
pageSourceProvider = new HivePageSourceProvider(hiveClientConfig, hdfsEnvironment, getDefaultHiveRecordCursorProvider(hiveClientConfig), getDefaultHiveDataStreamFactories(hiveClientConfig), TYPE_MANAGER);
}
use of io.airlift.concurrent.BoundedExecutor in project presto by prestodb.
the class TimeoutBackupStore method timeLimited.
private static <T> T timeLimited(T target, Class<T> clazz, Duration timeout, ExecutorService executor, int maxThreads) {
executor = new ExecutorServiceAdapter(new BoundedExecutor(executor, maxThreads));
TimeLimiter limiter = new SimpleTimeLimiter(executor);
return limiter.newProxy(target, clazz, timeout.toMillis(), MILLISECONDS);
}
Aggregations