Search in sources :

Example 21 with ConnectorTransactionHandle

use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.

the class MetadataManager method makeCompatiblePartitioning.

@Override
public TableHandle makeCompatiblePartitioning(Session session, TableHandle tableHandle, PartitioningHandle partitioningHandle) {
    checkArgument(partitioningHandle.getConnectorId().isPresent(), "Expect partitioning handle from connector, got system partitioning handle");
    CatalogName catalogName = partitioningHandle.getConnectorId().get();
    checkArgument(catalogName.equals(tableHandle.getCatalogName()), "ConnectorId of tableHandle and partitioningHandle does not match");
    CatalogMetadata catalogMetadata = getCatalogMetadata(session, catalogName);
    ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName);
    ConnectorTransactionHandle transaction = catalogMetadata.getTransactionHandleFor(catalogName);
    ConnectorTableHandle newTableHandle = metadata.makeCompatiblePartitioning(session.toConnectorSession(catalogName), tableHandle.getConnectorHandle(), partitioningHandle.getConnectorHandle());
    return new TableHandle(catalogName, newTableHandle, transaction);
}
Also used : ConnectorTransactionHandle(io.trino.spi.connector.ConnectorTransactionHandle) CatalogName(io.trino.connector.CatalogName) ConnectorOutputTableHandle(io.trino.spi.connector.ConnectorOutputTableHandle) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) ConnectorInsertTableHandle(io.trino.spi.connector.ConnectorInsertTableHandle) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle)

Example 22 with ConnectorTransactionHandle

use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.

the class TestTableWriterOperator method createTableWriterOperator.

private Operator createTableWriterOperator(PageSinkManager pageSinkManager, OperatorFactory statisticsAggregation, List<Type> outputTypes, Session session, DriverContext driverContext) {
    List<String> notNullColumnNames = new ArrayList<>(1);
    notNullColumnNames.add(null);
    TableWriterOperatorFactory factory = new TableWriterOperatorFactory(0, new PlanNodeId("test"), pageSinkManager, new CreateTarget(new OutputTableHandle(CONNECTOR_ID, new ConnectorTransactionHandle() {
    }, new ConnectorOutputTableHandle() {
    }), new SchemaTableName("testSchema", "testTable")), ImmutableList.of(0), notNullColumnNames, session, statisticsAggregation, outputTypes);
    return factory.createOperator(driverContext);
}
Also used : PlanNodeId(io.trino.sql.planner.plan.PlanNodeId) TableWriterOperatorFactory(io.trino.operator.TableWriterOperator.TableWriterOperatorFactory) ConnectorOutputTableHandle(io.trino.spi.connector.ConnectorOutputTableHandle) OutputTableHandle(io.trino.metadata.OutputTableHandle) CreateTarget(io.trino.sql.planner.plan.TableWriterNode.CreateTarget) ConnectorOutputTableHandle(io.trino.spi.connector.ConnectorOutputTableHandle) ArrayList(java.util.ArrayList) ConnectorTransactionHandle(io.trino.spi.connector.ConnectorTransactionHandle) SchemaTableName(io.trino.spi.connector.SchemaTableName)

Example 23 with ConnectorTransactionHandle

use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.

the class DeltaLakePageSourceProvider method createPageSource.

@Override
public ConnectorPageSource createPageSource(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit connectorSplit, ConnectorTableHandle connectorTable, List<ColumnHandle> columns, DynamicFilter dynamicFilter) {
    DeltaLakeSplit split = (DeltaLakeSplit) connectorSplit;
    DeltaLakeTableHandle table = (DeltaLakeTableHandle) connectorTable;
    // We reach here when we could not prune the split using file level stats, table predicate
    // and the dynamic filter in the coordinator during split generation. The file level stats
    // in DeltaLakeSplit#filePredicate could help to prune this split when a more selective dynamic filter
    // is available now, without having to access parquet file footer for row-group stats.
    // We avoid sending DeltaLakeSplit#splitPredicate to workers by using table.getPredicate() here.
    TupleDomain<DeltaLakeColumnHandle> filteredSplitPredicate = TupleDomain.intersect(ImmutableList.of(table.getNonPartitionConstraint(), split.getStatisticsPredicate(), dynamicFilter.getCurrentPredicate().transformKeys(DeltaLakeColumnHandle.class::cast)));
    if (filteredSplitPredicate.isNone()) {
        return new EmptyPageSource();
    }
    List<DeltaLakeColumnHandle> deltaLakeColumns = columns.stream().map(DeltaLakeColumnHandle.class::cast).collect(toImmutableList());
    Map<String, Optional<String>> partitionKeys = split.getPartitionKeys();
    List<DeltaLakeColumnHandle> regularColumns = deltaLakeColumns.stream().filter(column -> column.getColumnType() == REGULAR).collect(toImmutableList());
    List<HiveColumnHandle> hiveColumnHandles = regularColumns.stream().map(DeltaLakeColumnHandle::toHiveColumnHandle).collect(toImmutableList());
    Path path = new Path(split.getPath());
    HdfsContext hdfsContext = new HdfsContext(session);
    TupleDomain<HiveColumnHandle> parquetPredicate = getParquetTupleDomain(filteredSplitPredicate.simplify(domainCompactionThreshold));
    if (table.getWriteType().isPresent()) {
        return new DeltaLakeUpdatablePageSource(table, deltaLakeColumns, partitionKeys, split.getPath(), split.getFileSize(), split.getFileModifiedTime(), session, executorService, hdfsEnvironment, hdfsContext, parquetDateTimeZone, parquetReaderOptions, parquetPredicate, typeManager, updateResultJsonCodec);
    }
    ReaderPageSource pageSource = ParquetPageSourceFactory.createPageSource(path, split.getStart(), split.getLength(), split.getFileSize(), hiveColumnHandles, parquetPredicate, true, hdfsEnvironment, hdfsEnvironment.getConfiguration(hdfsContext, path), session.getIdentity(), parquetDateTimeZone, fileFormatDataSourceStats, parquetReaderOptions.withMaxReadBlockSize(getParquetMaxReadBlockSize(session)).withUseColumnIndex(isParquetUseColumnIndex(session)));
    verify(pageSource.getReaderColumns().isEmpty(), "All columns expected to be base columns");
    return new DeltaLakePageSource(deltaLakeColumns, partitionKeys, pageSource.get(), split.getPath(), split.getFileSize(), split.getFileModifiedTime());
}
Also used : DateTimeZone(org.joda.time.DateTimeZone) HiveSessionProperties.isParquetUseColumnIndex(io.trino.plugin.hive.HiveSessionProperties.isParquetUseColumnIndex) Inject(javax.inject.Inject) ParquetPageSourceFactory(io.trino.plugin.hive.parquet.ParquetPageSourceFactory) ImmutableList(com.google.common.collect.ImmutableList) Verify.verify(com.google.common.base.Verify.verify) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) Map(java.util.Map) Objects.requireNonNull(java.util.Objects.requireNonNull) ColumnHandle(io.trino.spi.connector.ColumnHandle) Path(org.apache.hadoop.fs.Path) ConnectorPageSource(io.trino.spi.connector.ConnectorPageSource) HiveColumnHandle(io.trino.plugin.hive.HiveColumnHandle) ExecutorService(java.util.concurrent.ExecutorService) ParquetReaderOptions(io.trino.parquet.ParquetReaderOptions) FileFormatDataSourceStats(io.trino.plugin.hive.FileFormatDataSourceStats) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) ImmutableMap(com.google.common.collect.ImmutableMap) ConnectorSplit(io.trino.spi.connector.ConnectorSplit) Domain(io.trino.spi.predicate.Domain) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) ConnectorPageSourceProvider(io.trino.spi.connector.ConnectorPageSourceProvider) StandardTypes(io.trino.spi.type.StandardTypes) ConnectorSession(io.trino.spi.connector.ConnectorSession) TupleDomain(io.trino.spi.predicate.TupleDomain) ReaderPageSource(io.trino.plugin.hive.ReaderPageSource) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) List(java.util.List) DeltaLakeSessionProperties.getParquetMaxReadBlockSize(io.trino.plugin.deltalake.DeltaLakeSessionProperties.getParquetMaxReadBlockSize) DynamicFilter(io.trino.spi.connector.DynamicFilter) Optional(java.util.Optional) ParquetReaderConfig(io.trino.plugin.hive.parquet.ParquetReaderConfig) EmptyPageSource(io.trino.spi.connector.EmptyPageSource) TypeManager(io.trino.spi.type.TypeManager) HiveConfig(io.trino.plugin.hive.HiveConfig) REGULAR(io.trino.plugin.deltalake.DeltaLakeColumnType.REGULAR) JsonCodec(io.airlift.json.JsonCodec) ConnectorTransactionHandle(io.trino.spi.connector.ConnectorTransactionHandle) Path(org.apache.hadoop.fs.Path) Optional(java.util.Optional) EmptyPageSource(io.trino.spi.connector.EmptyPageSource) ReaderPageSource(io.trino.plugin.hive.ReaderPageSource) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) HiveColumnHandle(io.trino.plugin.hive.HiveColumnHandle)

Example 24 with ConnectorTransactionHandle

use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.

the class MetadataManager method beginCreateTable.

@Override
public OutputTableHandle beginCreateTable(Session session, String catalogName, ConnectorTableMetadata tableMetadata, Optional<TableLayout> layout) {
    CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName);
    CatalogName catalog = catalogMetadata.getCatalogName();
    ConnectorMetadata metadata = catalogMetadata.getMetadata(session);
    ConnectorTransactionHandle transactionHandle = catalogMetadata.getTransactionHandleFor(catalog);
    ConnectorSession connectorSession = session.toConnectorSession(catalog);
    ConnectorOutputTableHandle handle = metadata.beginCreateTable(connectorSession, tableMetadata, layout.map(TableLayout::getLayout), getRetryPolicy(session).getRetryMode());
    // TODO this should happen after finish but there is no way to get table name in finish step
    if (catalogMetadata.getSecurityManagement() == SecurityManagement.SYSTEM) {
        systemSecurityMetadata.tableCreated(session, new CatalogSchemaTableName(catalogName, tableMetadata.getTable()));
    }
    return new OutputTableHandle(catalog, transactionHandle, handle);
}
Also used : ConnectorOutputTableHandle(io.trino.spi.connector.ConnectorOutputTableHandle) ConnectorOutputTableHandle(io.trino.spi.connector.ConnectorOutputTableHandle) ConnectorTransactionHandle(io.trino.spi.connector.ConnectorTransactionHandle) CatalogName(io.trino.connector.CatalogName) ConnectorSession(io.trino.spi.connector.ConnectorSession) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) CatalogSchemaTableName(io.trino.spi.connector.CatalogSchemaTableName)

Example 25 with ConnectorTransactionHandle

use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.

the class TestHiveConnectorFactory method assertCreateConnector.

private static void assertCreateConnector(String metastoreUri) {
    Map<String, String> config = ImmutableMap.<String, String>builder().put("hive.metastore.uri", metastoreUri).buildOrThrow();
    Connector connector = new HiveConnectorFactory("hive").create("hive-test", config, new TestingConnectorContext());
    ConnectorTransactionHandle transaction = connector.beginTransaction(READ_UNCOMMITTED, true, true);
    assertInstanceOf(connector.getMetadata(SESSION, transaction), ClassLoaderSafeConnectorMetadata.class);
    assertInstanceOf(connector.getSplitManager(), ClassLoaderSafeConnectorSplitManager.class);
    assertInstanceOf(connector.getPageSourceProvider(), ConnectorPageSourceProvider.class);
    connector.commit(transaction);
}
Also used : Connector(io.trino.spi.connector.Connector) ConnectorTransactionHandle(io.trino.spi.connector.ConnectorTransactionHandle) TestingConnectorContext(io.trino.testing.TestingConnectorContext)

Aggregations

ConnectorTransactionHandle (io.trino.spi.connector.ConnectorTransactionHandle)44 ConnectorSession (io.trino.spi.connector.ConnectorSession)23 ConnectorTableHandle (io.trino.spi.connector.ConnectorTableHandle)21 ColumnHandle (io.trino.spi.connector.ColumnHandle)15 List (java.util.List)14 Objects.requireNonNull (java.util.Objects.requireNonNull)14 ConnectorSplit (io.trino.spi.connector.ConnectorSplit)13 ImmutableList (com.google.common.collect.ImmutableList)12 Optional (java.util.Optional)12 Inject (javax.inject.Inject)12 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)10 CatalogName (io.trino.connector.CatalogName)9 ConnectorMetadata (io.trino.spi.connector.ConnectorMetadata)9 DynamicFilter (io.trino.spi.connector.DynamicFilter)9 Test (org.testng.annotations.Test)9 SchemaTableName (io.trino.spi.connector.SchemaTableName)8 Map (java.util.Map)8 ConnectorSplitSource (io.trino.spi.connector.ConnectorSplitSource)7 ConnectorTableMetadata (io.trino.spi.connector.ConnectorTableMetadata)7 TupleDomain (io.trino.spi.predicate.TupleDomain)7