use of com.facebook.presto.spi.connector.ConnectorNodePartitioningProvider in project presto by prestodb.
the class InternalIcebergConnectorFactory method createConnector.
public static Connector createConnector(String catalogName, Map<String, String> config, ConnectorContext context, Optional<ExtendedHiveMetastore> metastore) {
ClassLoader classLoader = InternalIcebergConnectorFactory.class.getClassLoader();
try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
Bootstrap app = new Bootstrap(new EventModule(), new MBeanModule(), new JsonModule(), new IcebergModule(catalogName), new IcebergMetastoreModule(), new HiveS3Module(catalogName), new HiveAuthenticationModule(), new HiveMetastoreModule(catalogName, metastore), new CachingModule(), binder -> {
binder.bind(NodeVersion.class).toInstance(new NodeVersion(context.getNodeManager().getCurrentNode().getVersion()));
binder.bind(NodeManager.class).toInstance(context.getNodeManager());
binder.bind(TypeManager.class).toInstance(context.getTypeManager());
binder.bind(PageIndexerFactory.class).toInstance(context.getPageIndexerFactory());
binder.bind(StandardFunctionResolution.class).toInstance(context.getStandardFunctionResolution());
binder.bind(RowExpressionService.class).toInstance(context.getRowExpressionService());
});
Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(config).initialize();
LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class);
IcebergTransactionManager transactionManager = injector.getInstance(IcebergTransactionManager.class);
IcebergMetadataFactory metadataFactory = injector.getInstance(IcebergMetadataFactory.class);
ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class);
ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class);
ConnectorPageSinkProvider pageSinkProvider = injector.getInstance(ConnectorPageSinkProvider.class);
ConnectorNodePartitioningProvider connectorDistributionProvider = injector.getInstance(ConnectorNodePartitioningProvider.class);
IcebergSessionProperties icebergSessionProperties = injector.getInstance(IcebergSessionProperties.class);
IcebergTableProperties icebergTableProperties = injector.getInstance(IcebergTableProperties.class);
Set<Procedure> procedures = injector.getInstance((Key<Set<Procedure>>) Key.get(Types.setOf(Procedure.class)));
ConnectorPlanOptimizer planOptimizer = injector.getInstance(IcebergPlanOptimizer.class);
return new IcebergConnector(lifeCycleManager, transactionManager, metadataFactory, new ClassLoaderSafeConnectorSplitManager(splitManager, classLoader), new ClassLoaderSafeConnectorPageSourceProvider(connectorPageSource, classLoader), new ClassLoaderSafeConnectorPageSinkProvider(pageSinkProvider, classLoader), new ClassLoaderSafeNodePartitioningProvider(connectorDistributionProvider, classLoader), ImmutableSet.of(), icebergSessionProperties.getSessionProperties(), IcebergSchemaProperties.SCHEMA_PROPERTIES, icebergTableProperties.getTableProperties(), new AllowAllAccessControl(), procedures, planOptimizer);
}
}
use of com.facebook.presto.spi.connector.ConnectorNodePartitioningProvider in project presto by prestodb.
the class TestLocalExchange method testCreatePartitionFunction.
@Test
public void testCreatePartitionFunction() {
int partitionCount = 10;
PartitioningProviderManager partitioningProviderManager = new PartitioningProviderManager();
partitioningProviderManager.addPartitioningProvider(new ConnectorId("prism"), new ConnectorNodePartitioningProvider() {
@Override
public ConnectorBucketNodeMap getBucketNodeMap(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorPartitioningHandle partitioningHandle, List<Node> sortedNodes) {
return createBucketNodeMap(Stream.generate(() -> sortedNodes).flatMap(List::stream).limit(10).collect(toImmutableList()), SOFT_AFFINITY);
}
@Override
public ToIntFunction<ConnectorSplit> getSplitBucketFunction(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorPartitioningHandle partitioningHandle) {
return null;
}
@Override
public BucketFunction getBucketFunction(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorPartitioningHandle partitioningHandle, List<Type> partitionChannelTypes, int bucketCount) {
return (Page page, int position) -> partitionCount;
}
@Override
public int getBucketCount(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorPartitioningHandle partitioningHandle) {
return 10;
}
});
PartitioningHandle partitioningHandle = new PartitioningHandle(Optional.of(new ConnectorId("prism")), Optional.of(new ConnectorTransactionHandle() {
@Override
public int hashCode() {
return super.hashCode();
}
@Override
public boolean equals(Object obj) {
return super.equals(obj);
}
}), new ConnectorPartitioningHandle() {
@Override
public boolean isSingleNode() {
return false;
}
@Override
public boolean isCoordinatorOnly() {
return false;
}
});
PartitionFunction partitionFunction = createPartitionFunction(partitioningProviderManager, session, partitioningHandle, 600, ImmutableList.of(), false);
assertEquals(partitionFunction.getPartitionCount(), partitionCount);
}
use of com.facebook.presto.spi.connector.ConnectorNodePartitioningProvider in project presto by prestodb.
the class LocalExchange method createPartitionFunction.
static PartitionFunction createPartitionFunction(PartitioningProviderManager partitioningProviderManager, Session session, PartitioningHandle partitioning, int partitionCount, List<Type> partitioningChannelTypes, boolean isHashPrecomputed) {
if (partitioning.getConnectorHandle() instanceof SystemPartitioningHandle) {
HashGenerator hashGenerator;
if (isHashPrecomputed) {
hashGenerator = new PrecomputedHashGenerator(0);
} else {
hashGenerator = InterpretedHashGenerator.createPositionalWithTypes(partitioningChannelTypes);
}
return new LocalPartitionGenerator(hashGenerator, partitionCount);
}
ConnectorNodePartitioningProvider partitioningProvider = partitioningProviderManager.getPartitioningProvider(partitioning.getConnectorId().get());
int bucketCount = partitioningProvider.getBucketCount(partitioning.getTransactionHandle().orElse(null), session.toConnectorSession(partitioning.getConnectorId().get()), partitioning.getConnectorHandle());
int[] bucketToPartition = new int[bucketCount];
for (int bucket = 0; bucket < bucketCount; bucket++) {
bucketToPartition[bucket] = bucket % partitionCount;
}
BucketFunction bucketFunction = partitioningProvider.getBucketFunction(partitioning.getTransactionHandle().orElse(null), session.toConnectorSession(), partitioning.getConnectorHandle(), partitioningChannelTypes, bucketCount);
checkArgument(bucketFunction != null, "No bucket function for partitioning: %s", partitioning);
return new BucketPartitionFunction(bucketFunction, bucketToPartition);
}
Aggregations