use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class MetadataManager method makeCompatiblePartitioning.
@Override
public TableHandle makeCompatiblePartitioning(Session session, TableHandle tableHandle, PartitioningHandle partitioningHandle) {
checkArgument(partitioningHandle.getConnectorId().isPresent(), "Expect partitioning handle from connector, got system partitioning handle");
CatalogName catalogName = partitioningHandle.getConnectorId().get();
checkArgument(catalogName.equals(tableHandle.getCatalogName()), "ConnectorId of tableHandle and partitioningHandle does not match");
CatalogMetadata catalogMetadata = getCatalogMetadata(session, catalogName);
ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName);
ConnectorTransactionHandle transaction = catalogMetadata.getTransactionHandleFor(catalogName);
ConnectorTableHandle newTableHandle = metadata.makeCompatiblePartitioning(session.toConnectorSession(catalogName), tableHandle.getConnectorHandle(), partitioningHandle.getConnectorHandle());
return new TableHandle(catalogName, newTableHandle, transaction);
}
use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class TestTableWriterOperator method createTableWriterOperator.
private Operator createTableWriterOperator(PageSinkManager pageSinkManager, OperatorFactory statisticsAggregation, List<Type> outputTypes, Session session, DriverContext driverContext) {
List<String> notNullColumnNames = new ArrayList<>(1);
notNullColumnNames.add(null);
TableWriterOperatorFactory factory = new TableWriterOperatorFactory(0, new PlanNodeId("test"), pageSinkManager, new CreateTarget(new OutputTableHandle(CONNECTOR_ID, new ConnectorTransactionHandle() {
}, new ConnectorOutputTableHandle() {
}), new SchemaTableName("testSchema", "testTable")), ImmutableList.of(0), notNullColumnNames, session, statisticsAggregation, outputTypes);
return factory.createOperator(driverContext);
}
use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class DeltaLakePageSourceProvider method createPageSource.
@Override
public ConnectorPageSource createPageSource(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit connectorSplit, ConnectorTableHandle connectorTable, List<ColumnHandle> columns, DynamicFilter dynamicFilter) {
DeltaLakeSplit split = (DeltaLakeSplit) connectorSplit;
DeltaLakeTableHandle table = (DeltaLakeTableHandle) connectorTable;
// We reach here when we could not prune the split using file level stats, table predicate
// and the dynamic filter in the coordinator during split generation. The file level stats
// in DeltaLakeSplit#filePredicate could help to prune this split when a more selective dynamic filter
// is available now, without having to access parquet file footer for row-group stats.
// We avoid sending DeltaLakeSplit#splitPredicate to workers by using table.getPredicate() here.
TupleDomain<DeltaLakeColumnHandle> filteredSplitPredicate = TupleDomain.intersect(ImmutableList.of(table.getNonPartitionConstraint(), split.getStatisticsPredicate(), dynamicFilter.getCurrentPredicate().transformKeys(DeltaLakeColumnHandle.class::cast)));
if (filteredSplitPredicate.isNone()) {
return new EmptyPageSource();
}
List<DeltaLakeColumnHandle> deltaLakeColumns = columns.stream().map(DeltaLakeColumnHandle.class::cast).collect(toImmutableList());
Map<String, Optional<String>> partitionKeys = split.getPartitionKeys();
List<DeltaLakeColumnHandle> regularColumns = deltaLakeColumns.stream().filter(column -> column.getColumnType() == REGULAR).collect(toImmutableList());
List<HiveColumnHandle> hiveColumnHandles = regularColumns.stream().map(DeltaLakeColumnHandle::toHiveColumnHandle).collect(toImmutableList());
Path path = new Path(split.getPath());
HdfsContext hdfsContext = new HdfsContext(session);
TupleDomain<HiveColumnHandle> parquetPredicate = getParquetTupleDomain(filteredSplitPredicate.simplify(domainCompactionThreshold));
if (table.getWriteType().isPresent()) {
return new DeltaLakeUpdatablePageSource(table, deltaLakeColumns, partitionKeys, split.getPath(), split.getFileSize(), split.getFileModifiedTime(), session, executorService, hdfsEnvironment, hdfsContext, parquetDateTimeZone, parquetReaderOptions, parquetPredicate, typeManager, updateResultJsonCodec);
}
ReaderPageSource pageSource = ParquetPageSourceFactory.createPageSource(path, split.getStart(), split.getLength(), split.getFileSize(), hiveColumnHandles, parquetPredicate, true, hdfsEnvironment, hdfsEnvironment.getConfiguration(hdfsContext, path), session.getIdentity(), parquetDateTimeZone, fileFormatDataSourceStats, parquetReaderOptions.withMaxReadBlockSize(getParquetMaxReadBlockSize(session)).withUseColumnIndex(isParquetUseColumnIndex(session)));
verify(pageSource.getReaderColumns().isEmpty(), "All columns expected to be base columns");
return new DeltaLakePageSource(deltaLakeColumns, partitionKeys, pageSource.get(), split.getPath(), split.getFileSize(), split.getFileModifiedTime());
}
use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class MetadataManager method beginCreateTable.
@Override
public OutputTableHandle beginCreateTable(Session session, String catalogName, ConnectorTableMetadata tableMetadata, Optional<TableLayout> layout) {
CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName);
CatalogName catalog = catalogMetadata.getCatalogName();
ConnectorMetadata metadata = catalogMetadata.getMetadata(session);
ConnectorTransactionHandle transactionHandle = catalogMetadata.getTransactionHandleFor(catalog);
ConnectorSession connectorSession = session.toConnectorSession(catalog);
ConnectorOutputTableHandle handle = metadata.beginCreateTable(connectorSession, tableMetadata, layout.map(TableLayout::getLayout), getRetryPolicy(session).getRetryMode());
// TODO this should happen after finish but there is no way to get table name in finish step
if (catalogMetadata.getSecurityManagement() == SecurityManagement.SYSTEM) {
systemSecurityMetadata.tableCreated(session, new CatalogSchemaTableName(catalogName, tableMetadata.getTable()));
}
return new OutputTableHandle(catalog, transactionHandle, handle);
}
use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class TestHiveConnectorFactory method assertCreateConnector.
private static void assertCreateConnector(String metastoreUri) {
Map<String, String> config = ImmutableMap.<String, String>builder().put("hive.metastore.uri", metastoreUri).buildOrThrow();
Connector connector = new HiveConnectorFactory("hive").create("hive-test", config, new TestingConnectorContext());
ConnectorTransactionHandle transaction = connector.beginTransaction(READ_UNCOMMITTED, true, true);
assertInstanceOf(connector.getMetadata(SESSION, transaction), ClassLoaderSafeConnectorMetadata.class);
assertInstanceOf(connector.getSplitManager(), ClassLoaderSafeConnectorSplitManager.class);
assertInstanceOf(connector.getPageSourceProvider(), ConnectorPageSourceProvider.class);
connector.commit(transaction);
}
Aggregations