use of com.google.common.util.concurrent.ListeningExecutorService in project MindsEye by SimiaCryptus.
the class CudnnTest method testTensorListMT.
private void testTensorListMT(@Nonnull NotebookOutput log, @Nonnull int[] dimensions, int length, double tolerance, int accumulations) {
@Nonnull Supplier<TensorList> factory = () -> TensorArray.wrap(IntStream.range(0, length).mapToObj(j -> {
@Nonnull Tensor tensor = new Tensor(dimensions);
Arrays.parallelSetAll(tensor.getData(), this::random);
return tensor;
}).toArray(j -> new Tensor[j]));
log.code(() -> {
@Nonnull ListeningExecutorService pool = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(5));
PrintStream out = SysOutInterceptor.INSTANCE.currentHandler();
try {
List<ListenableFuture<Object>> collect = IntStream.range(0, 16).mapToObj(workerNumber -> {
@Nonnull TimedResult<TensorList> originalTiming = TimedResult.time(() -> factory.get());
TensorList original = originalTiming.result;
logger.info(String.format("[%s] Calculated test data in %.4fsec", workerNumber, originalTiming.seconds()));
@Nonnull ListenableFuture<TensorList> mutableDataFuture = pool.submit(() -> CudaSystem.run(gpu -> {
PrintStream oldHandler = SysOutInterceptor.INSTANCE.setCurrentHandler(out);
@Nonnull TimedResult<CudaTensor> timedResult = TimedResult.time(() -> {
return gpu.getTensor(original, Precision.Double, MemoryType.Managed, false);
});
logger.info(String.format("[%s] Wrote %s in %.4f seconds, Device %d: %s", workerNumber, Arrays.toString(dimensions), timedResult.seconds(), gpu.getDeviceId(), CudaDevice.getDeviceName(gpu.getDeviceId())));
SysOutInterceptor.INSTANCE.setCurrentHandler(oldHandler);
return CudaTensorList.wrap(timedResult.result, length, dimensions, Precision.Double);
}, original));
@Nonnull TimedResult<List<TensorList>> accumulantTiming = TimedResult.time(() -> IntStream.range(0, accumulations).mapToObj(x -> factory.get()).collect(Collectors.toList()));
List<TensorList> accumulants = accumulantTiming.result;
@Nonnull TimedResult<TensorList> finalResultTiming = TimedResult.time(() -> {
return accumulants.stream().map(x -> {
x.addRef();
return x;
}).reduce((a, b) -> {
TensorList sum = a.addAndFree(b);
b.freeRef();
return sum;
}).map(x -> {
TensorList sum = x.add(original);
x.freeRef();
return sum;
}).orElseGet(() -> {
original.addRef();
return original;
});
});
logger.info(String.format("[%s] Calculated accumulant in %.4fsec", workerNumber, accumulantTiming.seconds()));
@Nonnull ListenableFuture<TensorList> accumulated = Futures.transform(mutableDataFuture, (x) -> {
PrintStream oldHandler = SysOutInterceptor.INSTANCE.setCurrentHandler(out);
@Nonnull AtomicReference<TensorList> mutableGpuData = new AtomicReference<>(x);
accumulants.stream().parallel().forEach(delta -> {
CudaSystem.run(gpu -> {
@Nonnull TimedResult<CudaTensorList> timedWrite = TimedResult.time(() -> {
@Nullable CudaTensor cudaMemory = gpu.getTensor(delta, Precision.Double, MemoryType.Managed, false);
delta.freeRef();
return CudaTensorList.wrap(cudaMemory, length, dimensions, Precision.Double);
});
@Nonnull TimedResult<Void> timedAccumulation = TimedResult.time(() -> {
synchronized (mutableGpuData) {
mutableGpuData.getAndUpdate(y -> {
TensorList add = y.add(timedWrite.result);
y.freeRef();
return add;
});
}
timedWrite.result.freeRef();
});
logger.info(String.format("[%s] Wrote in %.4f seconds and accumulated %s in %.4f seconds, Device %d: %s", workerNumber, timedAccumulation.seconds(), Arrays.toString(dimensions), timedWrite.seconds(), gpu.getDeviceId(), CudaDevice.getDeviceName(gpu.getDeviceId())));
}, delta);
});
SysOutInterceptor.INSTANCE.setCurrentHandler(oldHandler);
return mutableGpuData.get();
}, pool);
TensorList finalResult = finalResultTiming.result;
logger.info(String.format("[%s] Calculated final data in %.4fsec", workerNumber, finalResultTiming.seconds()));
return Futures.transform(accumulated, (write) -> {
original.freeRef();
PrintStream oldHandler = SysOutInterceptor.INSTANCE.setCurrentHandler(out);
CudaSystem.run(gpu -> {
@Nonnull TimedResult<Boolean> timedVerify = TimedResult.time(() -> {
@Nonnull TensorList minus = finalResult.minus(write);
double diffVal = minus.stream().mapToDouble(x -> {
double v = Arrays.stream(x.getData()).map(Math::abs).max().getAsDouble();
x.freeRef();
return v;
}).max().getAsDouble();
minus.freeRef();
return diffVal < tolerance;
});
logger.info(String.format("[%s] Read %s and verified in %.4fs using device %d: %s", workerNumber, Arrays.toString(dimensions), timedVerify.seconds(), gpu.getDeviceId(), CudaDevice.getDeviceName(gpu.getDeviceId())));
if (!timedVerify.result)
Assert.assertTrue(finalResult.prettyPrint() + " != " + write.prettyPrint(), timedVerify.result);
write.freeRef();
});
SysOutInterceptor.INSTANCE.setCurrentHandler(oldHandler);
finalResult.freeRef();
return null;
}, pool);
}).collect(Collectors.toList());
List<Object> objects = Futures.allAsList(collect).get();
} catch (@Nonnull InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
} finally {
pool.shutdown();
}
});
}
use of com.google.common.util.concurrent.ListeningExecutorService in project presto by prestodb.
the class ServerMainModule method setup.
@Override
protected void setup(Binder binder) {
ServerConfig serverConfig = buildConfigObject(ServerConfig.class);
if (serverConfig.isResourceManager()) {
install(new ResourceManagerModule());
} else if (serverConfig.isCoordinator()) {
install(new CoordinatorModule());
} else {
install(new WorkerModule());
}
install(new InternalCommunicationModule());
configBinder(binder).bindConfig(FeaturesConfig.class);
binder.bind(PlanChecker.class).in(Scopes.SINGLETON);
binder.bind(SqlParser.class).in(Scopes.SINGLETON);
binder.bind(SqlParserOptions.class).toInstance(sqlParserOptions);
sqlParserOptions.useEnhancedErrorHandler(serverConfig.isEnhancedErrorReporting());
jaxrsBinder(binder).bind(ThrowableMapper.class);
configBinder(binder).bindConfig(QueryManagerConfig.class);
configBinder(binder).bindConfig(SqlEnvironmentConfig.class);
jsonCodecBinder(binder).bindJsonCodec(ViewDefinition.class);
newOptionalBinder(binder, ExplainAnalyzeContext.class);
// GC Monitor
binder.bind(GcMonitor.class).to(JmxGcMonitor.class).in(Scopes.SINGLETON);
// session properties
binder.bind(SessionPropertyManager.class).in(Scopes.SINGLETON);
binder.bind(SystemSessionProperties.class).in(Scopes.SINGLETON);
binder.bind(SessionPropertyDefaults.class).in(Scopes.SINGLETON);
// schema properties
binder.bind(SchemaPropertyManager.class).in(Scopes.SINGLETON);
// table properties
binder.bind(TablePropertyManager.class).in(Scopes.SINGLETON);
// column properties
binder.bind(ColumnPropertyManager.class).in(Scopes.SINGLETON);
// analyze properties
binder.bind(AnalyzePropertyManager.class).in(Scopes.SINGLETON);
// node manager
discoveryBinder(binder).bindSelector("presto");
binder.bind(DiscoveryNodeManager.class).in(Scopes.SINGLETON);
binder.bind(InternalNodeManager.class).to(DiscoveryNodeManager.class).in(Scopes.SINGLETON);
newExporter(binder).export(DiscoveryNodeManager.class).withGeneratedName();
httpClientBinder(binder).bindHttpClient("node-manager", ForNodeManager.class).withTracing().withConfigDefaults(config -> {
config.setRequestTimeout(new Duration(10, SECONDS));
});
driftClientBinder(binder).bindDriftClient(ThriftServerInfoClient.class, ForNodeManager.class).withAddressSelector(((addressSelectorBinder, annotation, prefix) -> addressSelectorBinder.bind(AddressSelector.class).annotatedWith(annotation).to(FixedAddressSelector.class)));
// node scheduler
// TODO: remove from NodePartitioningManager and move to CoordinatorModule
configBinder(binder).bindConfig(NodeSchedulerConfig.class);
configBinder(binder).bindConfig(SimpleTtlNodeSelectorConfig.class);
binder.bind(NodeScheduler.class).in(Scopes.SINGLETON);
binder.bind(NodeSelectionStats.class).in(Scopes.SINGLETON);
newExporter(binder).export(NodeSelectionStats.class).withGeneratedName();
binder.bind(NodeSchedulerExporter.class).in(Scopes.SINGLETON);
binder.bind(NodeTaskMap.class).in(Scopes.SINGLETON);
newExporter(binder).export(NodeScheduler.class).withGeneratedName();
// network topology
// TODO: move to CoordinatorModule when NodeScheduler is moved
install(installModuleIf(NodeSchedulerConfig.class, config -> LEGACY.equalsIgnoreCase(config.getNetworkTopology()), moduleBinder -> moduleBinder.bind(NetworkTopology.class).to(LegacyNetworkTopology.class).in(Scopes.SINGLETON)));
install(installModuleIf(NodeSchedulerConfig.class, config -> FLAT.equalsIgnoreCase(config.getNetworkTopology()), moduleBinder -> moduleBinder.bind(NetworkTopology.class).to(FlatNetworkTopology.class).in(Scopes.SINGLETON)));
// task execution
jaxrsBinder(binder).bind(TaskResource.class);
newExporter(binder).export(TaskResource.class).withGeneratedName();
jaxrsBinder(binder).bind(TaskExecutorResource.class);
newExporter(binder).export(TaskExecutorResource.class).withGeneratedName();
binder.bind(TaskManagementExecutor.class).in(Scopes.SINGLETON);
install(new DefaultThriftCodecsModule());
thriftCodecBinder(binder).bindCustomThriftCodec(SqlInvokedFunctionCodec.class);
thriftCodecBinder(binder).bindCustomThriftCodec(SqlFunctionIdCodec.class);
jsonCodecBinder(binder).bindListJsonCodec(TaskMemoryReservationSummary.class);
binder.bind(SqlTaskManager.class).in(Scopes.SINGLETON);
binder.bind(TaskManager.class).to(Key.get(SqlTaskManager.class));
binder.bind(SpoolingOutputBufferFactory.class).in(Scopes.SINGLETON);
binder.bind(RandomResourceManagerAddressSelector.class).in(Scopes.SINGLETON);
driftClientBinder(binder).bindDriftClient(ResourceManagerClient.class, ForResourceManager.class).withAddressSelector((addressSelectorBinder, annotation, prefix) -> addressSelectorBinder.bind(AddressSelector.class).annotatedWith(annotation).to(RandomResourceManagerAddressSelector.class)).withExceptionClassifier(throwable -> {
if (throwable instanceof ResourceManagerInconsistentException) {
return new ExceptionClassification(Optional.of(true), DOWN);
}
return new ExceptionClassification(Optional.of(true), NORMAL);
});
newOptionalBinder(binder, ClusterMemoryManagerService.class);
install(installModuleIf(ServerConfig.class, ServerConfig::isResourceManagerEnabled, new Module() {
@Override
public void configure(Binder moduleBinder) {
configBinder(moduleBinder).bindConfig(ResourceManagerConfig.class);
moduleBinder.bind(ClusterStatusSender.class).to(ResourceManagerClusterStatusSender.class).in(Scopes.SINGLETON);
if (serverConfig.isCoordinator()) {
moduleBinder.bind(ClusterMemoryManagerService.class).in(Scopes.SINGLETON);
moduleBinder.bind(ResourceGroupService.class).to(ResourceManagerResourceGroupService.class).in(Scopes.SINGLETON);
}
}
@Provides
@Singleton
@ForResourceManager
public ScheduledExecutorService createResourceManagerScheduledExecutor(ResourceManagerConfig config) {
return createConcurrentScheduledExecutor("resource-manager-heartbeats", config.getHeartbeatConcurrency(), config.getHeartbeatThreads());
}
@Provides
@Singleton
@ForResourceManager
public ListeningExecutorService createResourceManagerExecutor(ResourceManagerConfig config) {
ExecutorService executor = new ThreadPoolExecutor(0, config.getResourceManagerExecutorThreads(), 60, SECONDS, new LinkedBlockingQueue<>(), daemonThreadsNamed("resource-manager-executor-%s"));
return listeningDecorator(executor);
}
}, moduleBinder -> {
moduleBinder.bind(ClusterStatusSender.class).toInstance(execution -> {
});
moduleBinder.bind(ResourceGroupService.class).to(NoopResourceGroupService.class).in(Scopes.SINGLETON);
}));
FeaturesConfig featuresConfig = buildConfigObject(FeaturesConfig.class);
FeaturesConfig.TaskSpillingStrategy taskSpillingStrategy = featuresConfig.getTaskSpillingStrategy();
switch(taskSpillingStrategy) {
case PER_TASK_MEMORY_THRESHOLD:
binder.bind(TaskThresholdMemoryRevokingScheduler.class).in(Scopes.SINGLETON);
break;
default:
binder.bind(MemoryRevokingScheduler.class).in(Scopes.SINGLETON);
}
// Add monitoring for JVM pauses
binder.bind(PauseMeter.class).in(Scopes.SINGLETON);
newExporter(binder).export(PauseMeter.class).withGeneratedName();
binder.bind(GcStatusMonitor.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(MemoryManagerConfig.class);
configBinder(binder).bindConfig(NodeMemoryConfig.class);
configBinder(binder).bindConfig(ReservedSystemMemoryConfig.class);
binder.bind(LocalMemoryManager.class).in(Scopes.SINGLETON);
binder.bind(LocalMemoryManagerExporter.class).in(Scopes.SINGLETON);
binder.bind(EmbedVersion.class).in(Scopes.SINGLETON);
newExporter(binder).export(TaskManager.class).withGeneratedName();
binder.bind(TaskExecutor.class).in(Scopes.SINGLETON);
newExporter(binder).export(TaskExecutor.class).withGeneratedName();
binder.bind(MultilevelSplitQueue.class).in(Scopes.SINGLETON);
newExporter(binder).export(MultilevelSplitQueue.class).withGeneratedName();
binder.bind(LocalExecutionPlanner.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(FileFragmentResultCacheConfig.class);
binder.bind(FragmentCacheStats.class).in(Scopes.SINGLETON);
newExporter(binder).export(FragmentCacheStats.class).withGeneratedName();
configBinder(binder).bindConfig(CompilerConfig.class);
binder.bind(ExpressionCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(ExpressionCompiler.class).withGeneratedName();
binder.bind(PageFunctionCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(PageFunctionCompiler.class).withGeneratedName();
configBinder(binder).bindConfig(TaskManagerConfig.class);
binder.bind(IndexJoinLookupStats.class).in(Scopes.SINGLETON);
newExporter(binder).export(IndexJoinLookupStats.class).withGeneratedName();
binder.bind(AsyncHttpExecutionMBean.class).in(Scopes.SINGLETON);
newExporter(binder).export(AsyncHttpExecutionMBean.class).withGeneratedName();
binder.bind(JoinFilterFunctionCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(JoinFilterFunctionCompiler.class).withGeneratedName();
binder.bind(JoinCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(JoinCompiler.class).withGeneratedName();
binder.bind(OrderingCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(OrderingCompiler.class).withGeneratedName();
binder.bind(PagesIndex.Factory.class).to(PagesIndex.DefaultFactory.class);
binder.bind(LookupJoinOperators.class).in(Scopes.SINGLETON);
jsonCodecBinder(binder).bindJsonCodec(TaskStatus.class);
jsonCodecBinder(binder).bindJsonCodec(StageInfo.class);
jsonCodecBinder(binder).bindJsonCodec(TaskInfo.class);
jsonCodecBinder(binder).bindJsonCodec(OperatorStats.class);
jsonCodecBinder(binder).bindJsonCodec(ExecutionFailureInfo.class);
jsonCodecBinder(binder).bindJsonCodec(TableCommitContext.class);
jsonCodecBinder(binder).bindJsonCodec(SqlInvokedFunction.class);
smileCodecBinder(binder).bindSmileCodec(TaskStatus.class);
smileCodecBinder(binder).bindSmileCodec(TaskInfo.class);
thriftCodecBinder(binder).bindThriftCodec(TaskStatus.class);
jaxrsBinder(binder).bind(PagesResponseWriter.class);
// exchange client
binder.bind(ExchangeClientSupplier.class).to(ExchangeClientFactory.class).in(Scopes.SINGLETON);
httpClientBinder(binder).bindHttpClient("exchange", ForExchange.class).withTracing().withFilter(GenerateTraceTokenRequestFilter.class).withConfigDefaults(config -> {
config.setRequestTimeout(new Duration(10, SECONDS));
config.setMaxConnectionsPerServer(250);
config.setMaxContentLength(new DataSize(32, MEGABYTE));
});
binder.install(new DriftNettyClientModule());
driftClientBinder(binder).bindDriftClient(ThriftTaskClient.class, ForExchange.class).withAddressSelector(((addressSelectorBinder, annotation, prefix) -> addressSelectorBinder.bind(AddressSelector.class).annotatedWith(annotation).to(FixedAddressSelector.class)));
configBinder(binder).bindConfig(ExchangeClientConfig.class);
binder.bind(ExchangeExecutionMBean.class).in(Scopes.SINGLETON);
newExporter(binder).export(ExchangeExecutionMBean.class).withGeneratedName();
// execution
binder.bind(LocationFactory.class).to(HttpLocationFactory.class).in(Scopes.SINGLETON);
// memory manager
jaxrsBinder(binder).bind(MemoryResource.class);
jsonCodecBinder(binder).bindJsonCodec(MemoryInfo.class);
jsonCodecBinder(binder).bindJsonCodec(MemoryPoolAssignmentsRequest.class);
smileCodecBinder(binder).bindSmileCodec(MemoryInfo.class);
smileCodecBinder(binder).bindSmileCodec(MemoryPoolAssignmentsRequest.class);
// transaction manager
configBinder(binder).bindConfig(TransactionManagerConfig.class);
// data stream provider
binder.bind(PageSourceManager.class).in(Scopes.SINGLETON);
binder.bind(PageSourceProvider.class).to(PageSourceManager.class).in(Scopes.SINGLETON);
// connector distributed metadata manager
binder.bind(ConnectorMetadataUpdaterManager.class).in(Scopes.SINGLETON);
// page sink provider
binder.bind(PageSinkManager.class).in(Scopes.SINGLETON);
binder.bind(PageSinkProvider.class).to(PageSinkManager.class).in(Scopes.SINGLETON);
// metadata
binder.bind(StaticCatalogStore.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(StaticCatalogStoreConfig.class);
binder.bind(StaticFunctionNamespaceStore.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(StaticFunctionNamespaceStoreConfig.class);
binder.bind(FunctionAndTypeManager.class).in(Scopes.SINGLETON);
binder.bind(MetadataManager.class).in(Scopes.SINGLETON);
binder.bind(Metadata.class).to(MetadataManager.class).in(Scopes.SINGLETON);
// row expression utils
binder.bind(DomainTranslator.class).to(RowExpressionDomainTranslator.class).in(Scopes.SINGLETON);
binder.bind(PredicateCompiler.class).to(RowExpressionPredicateCompiler.class).in(Scopes.SINGLETON);
binder.bind(DeterminismEvaluator.class).to(RowExpressionDeterminismEvaluator.class).in(Scopes.SINGLETON);
// type
binder.bind(TypeManager.class).to(FunctionAndTypeManager.class).in(Scopes.SINGLETON);
jsonBinder(binder).addDeserializerBinding(Type.class).to(TypeDeserializer.class);
newSetBinder(binder, Type.class);
// plan
jsonBinder(binder).addKeySerializerBinding(VariableReferenceExpression.class).to(VariableReferenceExpressionSerializer.class);
jsonBinder(binder).addKeyDeserializerBinding(VariableReferenceExpression.class).to(VariableReferenceExpressionDeserializer.class);
// split manager
binder.bind(SplitManager.class).in(Scopes.SINGLETON);
// partitioning provider manager
binder.bind(PartitioningProviderManager.class).in(Scopes.SINGLETON);
// node partitioning manager
binder.bind(NodePartitioningManager.class).in(Scopes.SINGLETON);
// connector plan optimizer manager
binder.bind(ConnectorPlanOptimizerManager.class).in(Scopes.SINGLETON);
// index manager
binder.bind(IndexManager.class).in(Scopes.SINGLETON);
// handle resolver
binder.install(new HandleJsonModule());
binder.bind(ObjectMapper.class).toProvider(JsonObjectMapperProvider.class);
// connector
binder.bind(ScalarStatsCalculator.class).in(Scopes.SINGLETON);
binder.bind(StatsNormalizer.class).in(Scopes.SINGLETON);
binder.bind(FilterStatsCalculator.class).in(Scopes.SINGLETON);
binder.bind(ConnectorManager.class).in(Scopes.SINGLETON);
// system connector
binder.install(new SystemConnectorModule());
// splits
jsonCodecBinder(binder).bindJsonCodec(TaskUpdateRequest.class);
jsonCodecBinder(binder).bindJsonCodec(ConnectorSplit.class);
jsonCodecBinder(binder).bindJsonCodec(PlanFragment.class);
smileCodecBinder(binder).bindSmileCodec(TaskUpdateRequest.class);
smileCodecBinder(binder).bindSmileCodec(ConnectorSplit.class);
smileCodecBinder(binder).bindSmileCodec(PlanFragment.class);
jsonBinder(binder).addSerializerBinding(Slice.class).to(SliceSerializer.class);
jsonBinder(binder).addDeserializerBinding(Slice.class).to(SliceDeserializer.class);
jsonBinder(binder).addSerializerBinding(Expression.class).to(ExpressionSerializer.class);
jsonBinder(binder).addDeserializerBinding(Expression.class).to(ExpressionDeserializer.class);
jsonBinder(binder).addDeserializerBinding(FunctionCall.class).to(FunctionCallDeserializer.class);
// metadata updates
jsonCodecBinder(binder).bindJsonCodec(MetadataUpdates.class);
smileCodecBinder(binder).bindSmileCodec(MetadataUpdates.class);
// split monitor
binder.bind(SplitMonitor.class).in(Scopes.SINGLETON);
// Determine the NodeVersion
NodeVersion nodeVersion = new NodeVersion(serverConfig.getPrestoVersion());
binder.bind(NodeVersion.class).toInstance(nodeVersion);
// presto announcement
checkArgument(!(serverConfig.isResourceManager() && serverConfig.isCoordinator()), "Server cannot be configured as both resource manager and coordinator");
discoveryBinder(binder).bindHttpAnnouncement("presto").addProperty("node_version", nodeVersion.toString()).addProperty("coordinator", String.valueOf(serverConfig.isCoordinator())).addProperty("resource_manager", String.valueOf(serverConfig.isResourceManager())).addProperty("connectorIds", nullToEmpty(serverConfig.getDataSources()));
// server info resource
jaxrsBinder(binder).bind(ServerInfoResource.class);
jsonCodecBinder(binder).bindJsonCodec(ServerInfo.class);
// node status resource
jaxrsBinder(binder).bind(StatusResource.class);
jsonCodecBinder(binder).bindJsonCodec(NodeStatus.class);
// plugin manager
binder.bind(PluginManager.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(PluginManagerConfig.class);
binder.bind(CatalogManager.class).in(Scopes.SINGLETON);
// block encodings
binder.bind(BlockEncodingManager.class).in(Scopes.SINGLETON);
binder.bind(BlockEncodingSerde.class).to(BlockEncodingManager.class).in(Scopes.SINGLETON);
newSetBinder(binder, BlockEncoding.class);
jsonBinder(binder).addSerializerBinding(Block.class).to(BlockJsonSerde.Serializer.class);
jsonBinder(binder).addDeserializerBinding(Block.class).to(BlockJsonSerde.Deserializer.class);
// thread visualizer
jaxrsBinder(binder).bind(ThreadResource.class);
// PageSorter
binder.bind(PageSorter.class).to(PagesIndexPageSorter.class).in(Scopes.SINGLETON);
// PageIndexer
binder.bind(PageIndexerFactory.class).to(GroupByHashPageIndexerFactory.class).in(Scopes.SINGLETON);
// Finalizer
binder.bind(FinalizerService.class).in(Scopes.SINGLETON);
// Spiller
binder.bind(SpillerFactory.class).to(GenericSpillerFactory.class).in(Scopes.SINGLETON);
binder.bind(StandaloneSpillerFactory.class).to(TempStorageStandaloneSpillerFactory.class).in(Scopes.SINGLETON);
binder.bind(PartitioningSpillerFactory.class).to(GenericPartitioningSpillerFactory.class).in(Scopes.SINGLETON);
binder.bind(SpillerStats.class).in(Scopes.SINGLETON);
newExporter(binder).export(SpillerFactory.class).withGeneratedName();
binder.bind(LocalSpillManager.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(NodeSpillConfig.class);
install(installModuleIf(FeaturesConfig.class, config -> config.getSingleStreamSpillerChoice() == SingleStreamSpillerChoice.LOCAL_FILE, moduleBinder -> moduleBinder.bind(SingleStreamSpillerFactory.class).to(FileSingleStreamSpillerFactory.class).in(Scopes.SINGLETON)));
install(installModuleIf(FeaturesConfig.class, config -> config.getSingleStreamSpillerChoice() == SingleStreamSpillerChoice.TEMP_STORAGE, moduleBinder -> moduleBinder.bind(SingleStreamSpillerFactory.class).to(TempStorageSingleStreamSpillerFactory.class).in(Scopes.SINGLETON)));
// Thrift RPC
binder.install(new DriftNettyServerModule());
driftServerBinder(binder).bindService(ThriftTaskService.class);
driftServerBinder(binder).bindService(ThriftServerInfoService.class);
// Async page transport
newMapBinder(binder, String.class, Servlet.class, TheServlet.class).addBinding("/v1/task/async/*").to(AsyncPageTransportServlet.class).in(Scopes.SINGLETON);
// cleanup
binder.bind(ExecutorCleanup.class).in(Scopes.SINGLETON);
// Distributed tracing
configBinder(binder).bindConfig(TracingConfig.class);
install(installModuleIf(TracingConfig.class, config -> !config.getEnableDistributedTracing() || NOOP.equalsIgnoreCase(config.getTracerType()), moduleBinder -> moduleBinder.bind(TracerProvider.class).to(NoopTracerProvider.class).in(Scopes.SINGLETON)));
install(installModuleIf(TracingConfig.class, config -> config.getEnableDistributedTracing() && SIMPLE.equalsIgnoreCase(config.getTracerType()), moduleBinder -> moduleBinder.bind(TracerProvider.class).to(SimpleTracerProvider.class).in(Scopes.SINGLETON)));
// Optional Status Detector
newOptionalBinder(binder, NodeStatusService.class);
}
use of com.google.common.util.concurrent.ListeningExecutorService in project presto by prestodb.
the class TestCachingHiveMetastore method testCachingWithPartitionVersioning.
@Test
public void testCachingWithPartitionVersioning() {
MockHiveMetastoreClient mockClient = new MockHiveMetastoreClient();
MockHiveCluster mockHiveCluster = new MockHiveCluster(mockClient);
ListeningExecutorService executor = listeningDecorator(newCachedThreadPool(daemonThreadsNamed("partition-versioning-test-%s")));
MockHiveMetastore mockHiveMetastore = new MockHiveMetastore(mockHiveCluster);
PartitionMutator mockPartitionMutator = new MockPartitionMutator(identity());
ColumnConverter hiveColumnConverter = new HiveColumnConverter();
CachingHiveMetastore partitionCachingEnabledmetastore = new CachingHiveMetastore(new BridgingHiveMetastore(mockHiveMetastore, mockPartitionMutator), executor, false, new Duration(5, TimeUnit.MINUTES), new Duration(1, TimeUnit.MINUTES), 1000, true, MetastoreCacheScope.PARTITION, 0.0);
assertEquals(mockClient.getAccessCount(), 0);
assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS);
assertEquals(mockClient.getAccessCount(), 1);
assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS);
// Assert that we did not hit cache
assertEquals(mockClient.getAccessCount(), 2);
// Select all of the available partitions and load them into the cache
assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2);
assertEquals(mockClient.getAccessCount(), 3);
// Now if we fetch any or both of them, they should hit the cache
assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1)).size(), 1);
assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION2)).size(), 1);
assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2);
assertEquals(mockClient.getAccessCount(), 3);
// This call should NOT invalidate the partition cache because partition version is same as before
assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS);
assertEquals(mockClient.getAccessCount(), 4);
assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2);
// Assert that its a cache hit
assertEquals(mockClient.getAccessCount(), 4);
assertInvalidateCache(new MockPartitionMutator(version -> version + 1));
assertInvalidateCache(new MockPartitionMutator(version -> version - 1));
}
use of com.google.common.util.concurrent.ListeningExecutorService in project presto by prestodb.
the class TestingSemiTransactionalHiveMetastore method create.
public static TestingSemiTransactionalHiveMetastore create() {
// none of these values matter, as we never use them
HiveClientConfig config = new HiveClientConfig();
MetastoreClientConfig metastoreClientConfig = new MetastoreClientConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config, metastoreClientConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication());
HiveCluster hiveCluster = new TestingHiveCluster(metastoreClientConfig, HOST, PORT);
ColumnConverterProvider columnConverterProvider = HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER;
ExtendedHiveMetastore delegate = new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster, metastoreClientConfig), new HivePartitionMutator());
ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("hive-%s"));
ListeningExecutorService renameExecutor = listeningDecorator(executor);
return new TestingSemiTransactionalHiveMetastore(hdfsEnvironment, delegate, renameExecutor, false, false, true, columnConverterProvider);
}
use of com.google.common.util.concurrent.ListeningExecutorService in project micro-service by Lovnx.
the class Task method testRateLimiter.
/**
* RateLimiter类似于JDK的信号量Semphore,他用来限制对资源并发访问的线程数
*/
public static void testRateLimiter() {
ListeningExecutorService executorService = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
// 每秒不超过4个任务被提交
RateLimiter limiter = RateLimiter.create(5.0);
for (int i = 0; i < 10; i++) {
// 请求RateLimiter, 超过permits会被阻塞
limiter.acquire();
final ListenableFuture<Integer> listenableFuture = executorService.submit(new Task("is " + i));
}
}
Aggregations