Search in sources :

Example 1 with ConnectorInsertTableHandle

use of io.trino.spi.connector.ConnectorInsertTableHandle in project trino by trinodb.

the class MetadataManager method beginRefreshMaterializedView.

@Override
public InsertTableHandle beginRefreshMaterializedView(Session session, TableHandle tableHandle, List<TableHandle> sourceTableHandles) {
    CatalogName catalogName = tableHandle.getCatalogName();
    CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName);
    ConnectorMetadata metadata = catalogMetadata.getMetadata(session);
    ConnectorTransactionHandle transactionHandle = catalogMetadata.getTransactionHandleFor(catalogName);
    List<ConnectorTableHandle> sourceConnectorHandles = sourceTableHandles.stream().map(TableHandle::getConnectorHandle).collect(Collectors.toList());
    sourceConnectorHandles.add(tableHandle.getConnectorHandle());
    if (sourceConnectorHandles.stream().map(Object::getClass).distinct().count() > 1) {
        throw new TrinoException(NOT_SUPPORTED, "Cross connector materialized views are not supported");
    }
    ConnectorInsertTableHandle handle = metadata.beginRefreshMaterializedView(session.toConnectorSession(catalogName), tableHandle.getConnectorHandle(), sourceConnectorHandles, getRetryPolicy(session).getRetryMode());
    return new InsertTableHandle(tableHandle.getCatalogName(), transactionHandle, handle);
}
Also used : ConnectorInsertTableHandle(io.trino.spi.connector.ConnectorInsertTableHandle) ConnectorTransactionHandle(io.trino.spi.connector.ConnectorTransactionHandle) TrinoException(io.trino.spi.TrinoException) CatalogName(io.trino.connector.CatalogName) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) ConnectorInsertTableHandle(io.trino.spi.connector.ConnectorInsertTableHandle) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle)

Example 2 with ConnectorInsertTableHandle

use of io.trino.spi.connector.ConnectorInsertTableHandle in project trino by trinodb.

the class MetadataManager method beginInsert.

@Override
public InsertTableHandle beginInsert(Session session, TableHandle tableHandle, List<ColumnHandle> columns) {
    CatalogName catalogName = tableHandle.getCatalogName();
    CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName);
    ConnectorMetadata metadata = catalogMetadata.getMetadata(session);
    ConnectorTransactionHandle transactionHandle = catalogMetadata.getTransactionHandleFor(catalogName);
    ConnectorInsertTableHandle handle = metadata.beginInsert(session.toConnectorSession(catalogName), tableHandle.getConnectorHandle(), columns, getRetryPolicy(session).getRetryMode());
    return new InsertTableHandle(tableHandle.getCatalogName(), transactionHandle, handle);
}
Also used : ConnectorInsertTableHandle(io.trino.spi.connector.ConnectorInsertTableHandle) ConnectorTransactionHandle(io.trino.spi.connector.ConnectorTransactionHandle) CatalogName(io.trino.connector.CatalogName) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) ConnectorInsertTableHandle(io.trino.spi.connector.ConnectorInsertTableHandle)

Example 3 with ConnectorInsertTableHandle

use of io.trino.spi.connector.ConnectorInsertTableHandle in project trino by trinodb.

the class HiveMetadata method finishInsert.

@Override
public Optional<ConnectorOutputMetadata> finishInsert(ConnectorSession session, ConnectorInsertTableHandle insertHandle, Collection<Slice> fragments, Collection<ComputedStatistics> computedStatistics) {
    HiveInsertTableHandle handle = (HiveInsertTableHandle) insertHandle;
    List<PartitionUpdate> partitionUpdates = fragments.stream().map(Slice::getBytes).map(partitionUpdateCodec::fromJson).collect(toImmutableList());
    HiveStorageFormat tableStorageFormat = handle.getTableStorageFormat();
    partitionUpdates = PartitionUpdate.mergePartitionUpdates(partitionUpdates);
    Table table = metastore.getTable(handle.getSchemaName(), handle.getTableName()).orElseThrow(() -> new TableNotFoundException(handle.getSchemaTableName()));
    if (!table.getStorage().getStorageFormat().getInputFormat().equals(tableStorageFormat.getInputFormat()) && isRespectTableFormat(session)) {
        throw new TrinoException(HIVE_CONCURRENT_MODIFICATION_DETECTED, "Table format changed during insert");
    }
    if (handle.getBucketProperty().isPresent() && isCreateEmptyBucketFiles(session)) {
        List<PartitionUpdate> partitionUpdatesForMissingBuckets = computePartitionUpdatesForMissingBuckets(session, handle, table, false, partitionUpdates);
        // replace partitionUpdates before creating the empty files so that those files will be cleaned up if we end up rollback
        partitionUpdates = PartitionUpdate.mergePartitionUpdates(concat(partitionUpdates, partitionUpdatesForMissingBuckets));
        for (PartitionUpdate partitionUpdate : partitionUpdatesForMissingBuckets) {
            Optional<Partition> partition = table.getPartitionColumns().isEmpty() ? Optional.empty() : Optional.of(buildPartitionObject(session, table, partitionUpdate));
            if (handle.isTransactional() && partition.isPresent()) {
                PartitionStatistics statistics = PartitionStatistics.builder().setBasicStatistics(partitionUpdate.getStatistics()).build();
                metastore.addPartition(session, handle.getSchemaName(), handle.getTableName(), partition.get(), partitionUpdate.getWritePath(), Optional.of(partitionUpdate.getFileNames()), statistics, handle.isRetriesEnabled());
            }
            createEmptyFiles(session, partitionUpdate.getWritePath(), table, partition, partitionUpdate.getFileNames());
        }
    }
    List<String> partitionedBy = table.getPartitionColumns().stream().map(Column::getName).collect(toImmutableList());
    Map<String, Type> columnTypes = handle.getInputColumns().stream().collect(toImmutableMap(HiveColumnHandle::getName, column -> column.getHiveType().getType(typeManager)));
    Map<List<String>, ComputedStatistics> partitionComputedStatistics = createComputedStatisticsToPartitionMap(computedStatistics, partitionedBy, columnTypes);
    for (PartitionUpdate partitionUpdate : partitionUpdates) {
        if (partitionUpdate.getName().isEmpty()) {
            // insert into unpartitioned table
            if (!table.getStorage().getStorageFormat().getInputFormat().equals(handle.getPartitionStorageFormat().getInputFormat()) && isRespectTableFormat(session)) {
                throw new TrinoException(HIVE_CONCURRENT_MODIFICATION_DETECTED, "Table format changed during insert");
            }
            PartitionStatistics partitionStatistics = createPartitionStatistics(partitionUpdate.getStatistics(), columnTypes, getColumnStatistics(partitionComputedStatistics, ImmutableList.of()));
            if (partitionUpdate.getUpdateMode() == OVERWRITE) {
                // get privileges from existing table
                PrincipalPrivileges principalPrivileges = fromHivePrivilegeInfos(metastore.listTablePrivileges(handle.getSchemaName(), handle.getTableName(), Optional.empty()));
                // first drop it
                metastore.dropTable(session, handle.getSchemaName(), handle.getTableName());
                // create the table with the new location
                metastore.createTable(session, table, principalPrivileges, Optional.of(partitionUpdate.getWritePath()), Optional.of(partitionUpdate.getFileNames()), false, partitionStatistics, handle.isRetriesEnabled());
            } else if (partitionUpdate.getUpdateMode() == NEW || partitionUpdate.getUpdateMode() == APPEND) {
                // insert into unpartitioned table
                metastore.finishInsertIntoExistingTable(session, handle.getSchemaName(), handle.getTableName(), partitionUpdate.getWritePath(), partitionUpdate.getFileNames(), partitionStatistics, handle.isRetriesEnabled());
            } else {
                throw new IllegalArgumentException("Unsupported update mode: " + partitionUpdate.getUpdateMode());
            }
        } else if (partitionUpdate.getUpdateMode() == APPEND) {
            // insert into existing partition
            List<String> partitionValues = toPartitionValues(partitionUpdate.getName());
            PartitionStatistics partitionStatistics = createPartitionStatistics(partitionUpdate.getStatistics(), columnTypes, getColumnStatistics(partitionComputedStatistics, partitionValues));
            metastore.finishInsertIntoExistingPartition(session, handle.getSchemaName(), handle.getTableName(), partitionValues, partitionUpdate.getWritePath(), partitionUpdate.getFileNames(), partitionStatistics, handle.isRetriesEnabled());
        } else if (partitionUpdate.getUpdateMode() == NEW || partitionUpdate.getUpdateMode() == OVERWRITE) {
            // insert into new partition or overwrite existing partition
            Partition partition = buildPartitionObject(session, table, partitionUpdate);
            if (!partition.getStorage().getStorageFormat().getInputFormat().equals(handle.getPartitionStorageFormat().getInputFormat()) && isRespectTableFormat(session)) {
                throw new TrinoException(HIVE_CONCURRENT_MODIFICATION_DETECTED, "Partition format changed during insert");
            }
            PartitionStatistics partitionStatistics = createPartitionStatistics(partitionUpdate.getStatistics(), columnTypes, getColumnStatistics(partitionComputedStatistics, partition.getValues()));
            if (partitionUpdate.getUpdateMode() == OVERWRITE) {
                if (handle.getLocationHandle().getWriteMode() == DIRECT_TO_TARGET_EXISTING_DIRECTORY) {
                    removeNonCurrentQueryFiles(session, partitionUpdate.getTargetPath());
                    if (handle.isRetriesEnabled()) {
                        HdfsContext hdfsContext = new HdfsContext(session);
                        cleanExtraOutputFiles(hdfsEnvironment, hdfsContext, session.getQueryId(), partitionUpdate.getTargetPath(), ImmutableSet.copyOf(partitionUpdate.getFileNames()));
                    }
                } else {
                    metastore.dropPartition(session, handle.getSchemaName(), handle.getTableName(), partition.getValues(), true);
                    metastore.addPartition(session, handle.getSchemaName(), handle.getTableName(), partition, partitionUpdate.getWritePath(), Optional.of(partitionUpdate.getFileNames()), partitionStatistics, handle.isRetriesEnabled());
                }
            } else {
                metastore.addPartition(session, handle.getSchemaName(), handle.getTableName(), partition, partitionUpdate.getWritePath(), Optional.of(partitionUpdate.getFileNames()), partitionStatistics, handle.isRetriesEnabled());
            }
        } else {
            throw new IllegalArgumentException(format("Unsupported update mode: %s", partitionUpdate.getUpdateMode()));
        }
    }
    if (isFullAcidTable(table.getParameters())) {
        HdfsContext context = new HdfsContext(session);
        for (PartitionUpdate update : partitionUpdates) {
            long writeId = handle.getTransaction().getWriteId();
            Path deltaDirectory = new Path(format("%s/%s/%s", table.getStorage().getLocation(), update.getName(), deltaSubdir(writeId, writeId, 0)));
            createOrcAcidVersionFile(context, deltaDirectory);
        }
    }
    return Optional.of(new HiveWrittenPartitions(partitionUpdates.stream().map(PartitionUpdate::getName).collect(toImmutableList())));
}
Also used : HiveTableProperties.getBucketProperty(io.trino.plugin.hive.HiveTableProperties.getBucketProperty) InsertExistingPartitionsBehavior(io.trino.plugin.hive.HiveSessionProperties.InsertExistingPartitionsBehavior) HiveSessionProperties.getQueryPartitionFilterRequiredSchemas(io.trino.plugin.hive.HiveSessionProperties.getQueryPartitionFilterRequiredSchemas) ORC_BLOOM_FILTER_FPP(io.trino.plugin.hive.HiveTableProperties.ORC_BLOOM_FILTER_FPP) FileSystem(org.apache.hadoop.fs.FileSystem) HiveBucketing.isSupportedBucketing(io.trino.plugin.hive.util.HiveBucketing.isSupportedBucketing) ANALYZE_COLUMNS_PROPERTY(io.trino.plugin.hive.HiveTableProperties.ANALYZE_COLUMNS_PROPERTY) TypeUtils.isFloatingPointNaN(io.trino.spi.type.TypeUtils.isFloatingPointNaN) ColumnStatisticType(io.trino.spi.statistics.ColumnStatisticType) HiveApplyProjectionUtil.replaceWithNewVariables(io.trino.plugin.hive.HiveApplyProjectionUtil.replaceWithNewVariables) NOT_SUPPORTED(io.trino.spi.StandardErrorCode.NOT_SUPPORTED) TableNotFoundException(io.trino.spi.connector.TableNotFoundException) HiveTableProperties.getFooterSkipCount(io.trino.plugin.hive.HiveTableProperties.getFooterSkipCount) TABLE_NOT_FOUND(io.trino.spi.StandardErrorCode.TABLE_NOT_FOUND) Map(java.util.Map) ViewNotFoundException(io.trino.spi.connector.ViewNotFoundException) HiveSessionProperties.isBucketExecutionEnabled(io.trino.plugin.hive.HiveSessionProperties.isBucketExecutionEnabled) AcidTransaction(io.trino.plugin.hive.acid.AcidTransaction) Domain(io.trino.spi.predicate.Domain) HiveSessionProperties.isStatisticsEnabled(io.trino.plugin.hive.HiveSessionProperties.isStatisticsEnabled) NULL_FORMAT_PROPERTY(io.trino.plugin.hive.HiveTableProperties.NULL_FORMAT_PROPERTY) HiveUtil.verifyPartitionTypeSupported(io.trino.plugin.hive.util.HiveUtil.verifyPartitionTypeSupported) MANAGED_TABLE(org.apache.hadoop.hive.metastore.TableType.MANAGED_TABLE) Collectors.joining(java.util.stream.Collectors.joining) Stream(java.util.stream.Stream) AcidUtils.deltaSubdir(org.apache.hadoop.hive.ql.io.AcidUtils.deltaSubdir) TrinoPrincipal(io.trino.spi.security.TrinoPrincipal) ConnectorPartitioningHandle(io.trino.spi.connector.ConnectorPartitioningHandle) CatalogSchemaTableName(io.trino.spi.connector.CatalogSchemaTableName) HIVE_CONCURRENT_MODIFICATION_DETECTED(io.trino.plugin.hive.HiveErrorCode.HIVE_CONCURRENT_MODIFICATION_DETECTED) HIVE_UNKNOWN_ERROR(io.trino.plugin.hive.HiveErrorCode.HIVE_UNKNOWN_ERROR) TableScanRedirectApplicationResult(io.trino.spi.connector.TableScanRedirectApplicationResult) TableColumnsMetadata(io.trino.spi.connector.TableColumnsMetadata) HiveSessionProperties.isSortedWritingEnabled(io.trino.plugin.hive.HiveSessionProperties.isSortedWritingEnabled) Joiner(com.google.common.base.Joiner) Partition(io.trino.plugin.hive.metastore.Partition) HiveColumnHandle.updateRowIdColumnHandle(io.trino.plugin.hive.HiveColumnHandle.updateRowIdColumnHandle) HiveUtil(io.trino.plugin.hive.util.HiveUtil) HiveWriterFactory.computeNonTransactionalBucketedFilename(io.trino.plugin.hive.HiveWriterFactory.computeNonTransactionalBucketedFilename) BUCKETED_BY_PROPERTY(io.trino.plugin.hive.HiveTableProperties.BUCKETED_BY_PROPERTY) ColumnMetadata(io.trino.spi.connector.ColumnMetadata) INVALID_ANALYZE_PROPERTY(io.trino.spi.StandardErrorCode.INVALID_ANALYZE_PROPERTY) ConnectorTableMetadata(io.trino.spi.connector.ConnectorTableMetadata) HiveBasicStatistics.createEmptyStatistics(io.trino.plugin.hive.HiveBasicStatistics.createEmptyStatistics) Variable(io.trino.spi.expression.Variable) StorageFormat.fromHiveStorageFormat(io.trino.plugin.hive.metastore.StorageFormat.fromHiveStorageFormat) Supplier(java.util.function.Supplier) HiveTableProperties.getSingleCharacterProperty(io.trino.plugin.hive.HiveTableProperties.getSingleCharacterProperty) TimestampType(io.trino.spi.type.TimestampType) ADD(io.trino.plugin.hive.util.Statistics.ReduceOperator.ADD) HiveSessionProperties.getCompressionCodec(io.trino.plugin.hive.HiveSessionProperties.getCompressionCodec) OptionalLong(java.util.OptionalLong) HIVE_INVALID_METADATA(io.trino.plugin.hive.HiveErrorCode.HIVE_INVALID_METADATA) PATH_COLUMN_NAME(io.trino.plugin.hive.HiveColumnHandle.PATH_COLUMN_NAME) GrantInfo(io.trino.spi.security.GrantInfo) MaterializedViewFreshness(io.trino.spi.connector.MaterializedViewFreshness) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) INVALID_TABLE_PROPERTY(io.trino.spi.StandardErrorCode.INVALID_TABLE_PROPERTY) HiveWriteUtils(io.trino.plugin.hive.util.HiveWriteUtils) HiveAnalyzeProperties.getColumnNames(io.trino.plugin.hive.HiveAnalyzeProperties.getColumnNames) MapType(io.trino.spi.type.MapType) HIVE_UNSUPPORTED_FORMAT(io.trino.plugin.hive.HiveErrorCode.HIVE_UNSUPPORTED_FORMAT) HiveSessionProperties.getTimestampPrecision(io.trino.plugin.hive.HiveSessionProperties.getTimestampPrecision) IOException(java.io.IOException) Iterables.getOnlyElement(com.google.common.collect.Iterables.getOnlyElement) HiveTableProperties.getPartitionedBy(io.trino.plugin.hive.HiveTableProperties.getPartitionedBy) CatalogName(io.trino.plugin.base.CatalogName) HiveSessionProperties.isCollectColumnStatisticsOnWrite(io.trino.plugin.hive.HiveSessionProperties.isCollectColumnStatisticsOnWrite) AVRO_SCHEMA_URL(io.trino.plugin.hive.HiveTableProperties.AVRO_SCHEMA_URL) HiveBucketing.getHiveBucketHandle(io.trino.plugin.hive.util.HiveBucketing.getHiveBucketHandle) HiveUtil.isHiveSystemSchema(io.trino.plugin.hive.util.HiveUtil.isHiveSystemSchema) HiveTableProperties.getHeaderSkipCount(io.trino.plugin.hive.HiveTableProperties.getHeaderSkipCount) ConnectorTableProperties(io.trino.spi.connector.ConnectorTableProperties) ConnectorExpression(io.trino.spi.expression.ConnectorExpression) Boolean.parseBoolean(java.lang.Boolean.parseBoolean) HiveWriteUtils.initializeSerializer(io.trino.plugin.hive.util.HiveWriteUtils.initializeSerializer) HiveSessionProperties.isCreateEmptyBucketFiles(io.trino.plugin.hive.HiveSessionProperties.isCreateEmptyBucketFiles) TableStatisticsMetadata(io.trino.spi.statistics.TableStatisticsMetadata) WriteInfo(io.trino.plugin.hive.LocationService.WriteInfo) HiveSessionProperties.isNonTransactionalOptimizeEnabled(io.trino.plugin.hive.HiveSessionProperties.isNonTransactionalOptimizeEnabled) PARTITION_KEY(io.trino.plugin.hive.HiveColumnHandle.ColumnType.PARTITION_KEY) HivePartitionManager.extractPartitionValues(io.trino.plugin.hive.HivePartitionManager.extractPartitionValues) HiveTableProperties.getHiveStorageFormat(io.trino.plugin.hive.HiveTableProperties.getHiveStorageFormat) HiveTableProperties.getAvroSchemaUrl(io.trino.plugin.hive.HiveTableProperties.getAvroSchemaUrl) CompressionConfigUtil.configureCompression(io.trino.plugin.hive.util.CompressionConfigUtil.configureCompression) HiveUtil.toPartitionValues(io.trino.plugin.hive.util.HiveUtil.toPartitionValues) Database(io.trino.plugin.hive.metastore.Database) DIRECT_TO_TARGET_EXISTING_DIRECTORY(io.trino.plugin.hive.LocationHandle.WriteMode.DIRECT_TO_TARGET_EXISTING_DIRECTORY) NO_RETRIES(io.trino.spi.connector.RetryMode.NO_RETRIES) AcidOperation(io.trino.plugin.hive.acid.AcidOperation) ConnectorMaterializedViewDefinition(io.trino.spi.connector.ConnectorMaterializedViewDefinition) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) MetadataProvider(io.trino.spi.connector.MetadataProvider) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) Collectors.toMap(java.util.stream.Collectors.toMap) Block(io.trino.spi.block.Block) ViewReaderUtil.encodeViewData(io.trino.plugin.hive.ViewReaderUtil.encodeViewData) ConnectorViewDefinition(io.trino.spi.connector.ConnectorViewDefinition) Statistics.reduce(io.trino.plugin.hive.util.Statistics.reduce) ImmutableSet(com.google.common.collect.ImmutableSet) SemiTransactionalHiveMetastore.cleanExtraOutputFiles(io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore.cleanExtraOutputFiles) Collection(java.util.Collection) VIEW_STORAGE_FORMAT(io.trino.plugin.hive.metastore.StorageFormat.VIEW_STORAGE_FORMAT) ComputedStatistics(io.trino.spi.statistics.ComputedStatistics) ROW_COUNT(io.trino.spi.statistics.TableStatisticType.ROW_COUNT) Constraint.alwaysTrue(io.trino.spi.connector.Constraint.alwaysTrue) PRESTO_VIEW_FLAG(io.trino.plugin.hive.ViewReaderUtil.PRESTO_VIEW_FLAG) INVALID_SCHEMA_PROPERTY(io.trino.spi.StandardErrorCode.INVALID_SCHEMA_PROPERTY) BUCKET_COLUMN_NAME(io.trino.plugin.hive.HiveColumnHandle.BUCKET_COLUMN_NAME) BIGINT(io.trino.spi.type.BigintType.BIGINT) StandardErrorCode(io.trino.spi.StandardErrorCode) SORTED_BY_PROPERTY(io.trino.plugin.hive.HiveTableProperties.SORTED_BY_PROPERTY) BeginTableExecuteResult(io.trino.spi.connector.BeginTableExecuteResult) AcidTransaction.forCreateTable(io.trino.plugin.hive.acid.AcidTransaction.forCreateTable) ProjectedColumnRepresentation(io.trino.plugin.hive.HiveApplyProjectionUtil.ProjectedColumnRepresentation) TableStatisticType(io.trino.spi.statistics.TableStatisticType) JsonCodec(io.airlift.json.JsonCodec) Constraint(io.trino.spi.connector.Constraint) HiveSessionProperties.getHiveStorageFormat(io.trino.plugin.hive.HiveSessionProperties.getHiveStorageFormat) AUTO_PURGE(io.trino.plugin.hive.HiveTableProperties.AUTO_PURGE) NANOSECONDS(io.trino.plugin.hive.HiveTimestampPrecision.NANOSECONDS) CompletableFuture(java.util.concurrent.CompletableFuture) HiveSessionProperties.isProjectionPushdownEnabled(io.trino.plugin.hive.HiveSessionProperties.isProjectionPushdownEnabled) Statistics.createComputedStatisticsToPartitionMap(io.trino.plugin.hive.util.Statistics.createComputedStatisticsToPartitionMap) OptionalInt(java.util.OptionalInt) Function(java.util.function.Function) HiveTableProperties.isTransactional(io.trino.plugin.hive.HiveTableProperties.isTransactional) HashSet(java.util.HashSet) ViewReaderUtil.createViewReader(io.trino.plugin.hive.ViewReaderUtil.createViewReader) OpenCSVSerde(org.apache.hadoop.hive.serde2.OpenCSVSerde) ImmutableList(com.google.common.collect.ImmutableList) TEXTFILE_FIELD_SEPARATOR_ESCAPE(io.trino.plugin.hive.HiveTableProperties.TEXTFILE_FIELD_SEPARATOR_ESCAPE) TableStatistics(io.trino.spi.statistics.TableStatistics) HiveColumnHandle.createBaseColumn(io.trino.plugin.hive.HiveColumnHandle.createBaseColumn) NoSuchElementException(java.util.NoSuchElementException) VIRTUAL_VIEW(org.apache.hadoop.hive.metastore.TableType.VIRTUAL_VIEW) RetryMode(io.trino.spi.connector.RetryMode) NEW(io.trino.plugin.hive.PartitionUpdate.UpdateMode.NEW) HIVE_VIEW_TRANSLATION_ERROR(io.trino.plugin.hive.HiveErrorCode.HIVE_VIEW_TRANSLATION_ERROR) HiveUtil.getRegularColumnHandles(io.trino.plugin.hive.util.HiveUtil.getRegularColumnHandles) HiveWriteUtils.isFileCreatedByQuery(io.trino.plugin.hive.util.HiveWriteUtils.isFileCreatedByQuery) HIVE_STRING(io.trino.plugin.hive.HiveType.HIVE_STRING) JobConf(org.apache.hadoop.mapred.JobConf) HiveTableProperties.getNullFormat(io.trino.plugin.hive.HiveTableProperties.getNullFormat) HiveTableProperties.getOrcBloomFilterFpp(io.trino.plugin.hive.HiveTableProperties.getOrcBloomFilterFpp) OrcAcidVersion.writeVersionFile(org.apache.hadoop.hive.ql.io.AcidUtils.OrcAcidVersion.writeVersionFile) PrincipalPrivileges(io.trino.plugin.hive.metastore.PrincipalPrivileges) HiveWriterFactory.computeTransactionalBucketedFilename(io.trino.plugin.hive.HiveWriterFactory.computeTransactionalBucketedFilename) TypeManager(io.trino.spi.type.TypeManager) SystemTables.getSourceTableNameFromSystemTable(io.trino.plugin.hive.util.SystemTables.getSourceTableNameFromSystemTable) HiveTableProperties.getExternalLocation(io.trino.plugin.hive.HiveTableProperties.getExternalLocation) NO_ACID_TRANSACTION(io.trino.plugin.hive.acid.AcidTransaction.NO_ACID_TRANSACTION) ColumnStatisticMetadata(io.trino.spi.statistics.ColumnStatisticMetadata) ConnectorTableExecuteHandle(io.trino.spi.connector.ConnectorTableExecuteHandle) HiveAnalyzeProperties.getPartitionList(io.trino.plugin.hive.HiveAnalyzeProperties.getPartitionList) Column(io.trino.plugin.hive.metastore.Column) FILE_MODIFIED_TIME_COLUMN_NAME(io.trino.plugin.hive.HiveColumnHandle.FILE_MODIFIED_TIME_COLUMN_NAME) ConnectorOutputTableHandle(io.trino.spi.connector.ConnectorOutputTableHandle) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) ProjectionApplicationResult(io.trino.spi.connector.ProjectionApplicationResult) PARTITION_COLUMN_NAME(io.trino.plugin.hive.HiveColumnHandle.PARTITION_COLUMN_NAME) ORC_BLOOM_FILTER_COLUMNS(io.trino.plugin.hive.HiveTableProperties.ORC_BLOOM_FILTER_COLUMNS) Table(io.trino.plugin.hive.metastore.Table) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) PARTITIONED_BY_PROPERTY(io.trino.plugin.hive.HiveTableProperties.PARTITIONED_BY_PROPERTY) org.apache.hadoop.hive.serde.serdeConstants(org.apache.hadoop.hive.serde.serdeConstants) Set(java.util.Set) MetastoreUtil.getHiveSchema(io.trino.plugin.hive.metastore.MetastoreUtil.getHiveSchema) SchemaTableName(io.trino.spi.connector.SchemaTableName) SortingProperty(io.trino.spi.connector.SortingProperty) HIVE_COLUMN_ORDER_MISMATCH(io.trino.plugin.hive.HiveErrorCode.HIVE_COLUMN_ORDER_MISMATCH) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) CSV_QUOTE(io.trino.plugin.hive.HiveTableProperties.CSV_QUOTE) SchemaTablePrefix(io.trino.spi.connector.SchemaTablePrefix) MetastoreUtil.getProtectMode(io.trino.plugin.hive.metastore.MetastoreUtil.getProtectMode) HIVE_INVALID_VIEW_DATA(io.trino.plugin.hive.HiveErrorCode.HIVE_INVALID_VIEW_DATA) AcidUtils.isTransactionalTable(org.apache.hadoop.hive.ql.io.AcidUtils.isTransactionalTable) ConnectorTablePartitioning(io.trino.spi.connector.ConnectorTablePartitioning) RemoteIterator(org.apache.hadoop.fs.RemoteIterator) HivePrincipal(io.trino.plugin.hive.metastore.HivePrincipal) Iterables(com.google.common.collect.Iterables) ConnectorTableLayout(io.trino.spi.connector.ConnectorTableLayout) ConnectorInsertTableHandle(io.trino.spi.connector.ConnectorInsertTableHandle) Slice(io.airlift.slice.Slice) NullableValue(io.trino.spi.predicate.NullableValue) ConfigurationUtils.toJobConf(io.trino.plugin.hive.util.ConfigurationUtils.toJobConf) NON_TRANSACTIONAL_OPTIMIZE_ENABLED(io.trino.plugin.hive.HiveSessionProperties.NON_TRANSACTIONAL_OPTIMIZE_ENABLED) HiveUtil.columnExtraInfo(io.trino.plugin.hive.util.HiveUtil.columnExtraInfo) ArrayList(java.util.ArrayList) PrincipalPrivileges.fromHivePrivilegeInfos(io.trino.plugin.hive.metastore.PrincipalPrivileges.fromHivePrivilegeInfos) ColumnHandle(io.trino.spi.connector.ColumnHandle) HiveUtil.hiveColumnHandles(io.trino.plugin.hive.util.HiveUtil.hiveColumnHandles) HiveType.toHiveType(io.trino.plugin.hive.HiveType.toHiveType) STORAGE_FORMAT_PROPERTY(io.trino.plugin.hive.HiveTableProperties.STORAGE_FORMAT_PROPERTY) ViewReaderUtil.isPrestoView(io.trino.plugin.hive.ViewReaderUtil.isPrestoView) HiveUtil.isIcebergTable(io.trino.plugin.hive.util.HiveUtil.isIcebergTable) HiveSessionProperties.isRespectTableFormat(io.trino.plugin.hive.HiveSessionProperties.isRespectTableFormat) RetryDriver.retry(io.trino.plugin.hive.util.RetryDriver.retry) ConstraintApplicationResult(io.trino.spi.connector.ConstraintApplicationResult) Properties(java.util.Properties) HiveWriteUtils.checkTableIsWritable(io.trino.plugin.hive.util.HiveWriteUtils.checkTableIsWritable) EXTERNAL_LOCATION_PROPERTY(io.trino.plugin.hive.HiveTableProperties.EXTERNAL_LOCATION_PROPERTY) TEXTFILE_FIELD_SEPARATOR(io.trino.plugin.hive.HiveTableProperties.TEXTFILE_FIELD_SEPARATOR) ConnectorSession(io.trino.spi.connector.ConnectorSession) HiveStatisticsProvider(io.trino.plugin.hive.statistics.HiveStatisticsProvider) RoleGrant(io.trino.spi.security.RoleGrant) File(java.io.File) HiveSessionProperties.isDelegateTransactionalManagedTableLocationToMetastore(io.trino.plugin.hive.HiveSessionProperties.isDelegateTransactionalManagedTableLocationToMetastore) HiveUtil.isSparkBucketedTable(io.trino.plugin.hive.util.HiveUtil.isSparkBucketedTable) DiscretePredicates(io.trino.spi.connector.DiscretePredicates) Sets.intersection(com.google.common.collect.Sets.intersection) TableType(org.apache.hadoop.hive.metastore.TableType) HiveSessionProperties.isParallelPartitionedBucketedWrites(io.trino.plugin.hive.HiveSessionProperties.isParallelPartitionedBucketedWrites) ViewReaderUtil.isHiveOrPrestoView(io.trino.plugin.hive.ViewReaderUtil.isHiveOrPrestoView) HiveSessionProperties.isQueryPartitionFilterRequired(io.trino.plugin.hive.HiveSessionProperties.isQueryPartitionFilterRequired) HIVE_WRITER_CLOSE_ERROR(io.trino.plugin.hive.HiveErrorCode.HIVE_WRITER_CLOSE_ERROR) URL(java.net.URL) HiveUtil.isDeltaLakeTable(io.trino.plugin.hive.util.HiveUtil.isDeltaLakeTable) EXTERNAL_TABLE(org.apache.hadoop.hive.metastore.TableType.EXTERNAL_TABLE) SchemaNotFoundException(io.trino.spi.connector.SchemaNotFoundException) HiveBucketing(io.trino.plugin.hive.util.HiveBucketing) Iterables.concat(com.google.common.collect.Iterables.concat) CatalogSchemaName(io.trino.spi.connector.CatalogSchemaName) Path(org.apache.hadoop.fs.Path) ErrorType(io.trino.spi.ErrorType) HIVE_FILESYSTEM_ERROR(io.trino.plugin.hive.HiveErrorCode.HIVE_FILESYSTEM_ERROR) Splitter(com.google.common.base.Splitter) StorageFormat(io.trino.plugin.hive.metastore.StorageFormat) OVERWRITE(io.trino.plugin.hive.PartitionUpdate.UpdateMode.OVERWRITE) SKIP_HEADER_LINE_COUNT(io.trino.plugin.hive.HiveTableProperties.SKIP_HEADER_LINE_COUNT) RowType(io.trino.spi.type.RowType) ImmutableMap(com.google.common.collect.ImmutableMap) AccessControlMetadata(io.trino.plugin.hive.security.AccessControlMetadata) HiveWriteUtils.isS3FileSystem(io.trino.plugin.hive.util.HiveWriteUtils.isS3FileSystem) AcidUtils.isFullAcidTable(org.apache.hadoop.hive.ql.io.AcidUtils.isFullAcidTable) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HiveTableProperties.getAnalyzeColumns(io.trino.plugin.hive.HiveTableProperties.getAnalyzeColumns) TrinoException(io.trino.spi.TrinoException) ArrayType(io.trino.spi.type.ArrayType) Statistics.fromComputedStatistics(io.trino.plugin.hive.util.Statistics.fromComputedStatistics) HiveApplyProjectionUtil.find(io.trino.plugin.hive.HiveApplyProjectionUtil.find) ConnectorOutputMetadata(io.trino.spi.connector.ConnectorOutputMetadata) Sets(com.google.common.collect.Sets) String.format(java.lang.String.format) Preconditions.checkState(com.google.common.base.Preconditions.checkState) DataSize(io.airlift.units.DataSize) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) List(java.util.List) MetastoreUtil.buildInitialPrivilegeSet(io.trino.plugin.hive.metastore.MetastoreUtil.buildInitialPrivilegeSet) MetastoreUtil.verifyOnline(io.trino.plugin.hive.metastore.MetastoreUtil.verifyOnline) FILE_SIZE_COLUMN_NAME(io.trino.plugin.hive.HiveColumnHandle.FILE_SIZE_COLUMN_NAME) Assignment(io.trino.spi.connector.Assignment) Function.identity(java.util.function.Function.identity) Optional(java.util.Optional) MoreObjects.firstNonNull(com.google.common.base.MoreObjects.firstNonNull) SystemTable(io.trino.spi.connector.SystemTable) CSV_SEPARATOR(io.trino.plugin.hive.HiveTableProperties.CSV_SEPARATOR) HiveWriteUtils.isWritableType(io.trino.plugin.hive.util.HiveWriteUtils.isWritableType) HiveSessionProperties.isOptimizedMismatchedBucketCount(io.trino.plugin.hive.HiveSessionProperties.isOptimizedMismatchedBucketCount) Logger(io.airlift.log.Logger) Type(io.trino.spi.type.Type) VarcharType.createUnboundedVarcharType(io.trino.spi.type.VarcharType.createUnboundedVarcharType) HashMap(java.util.HashMap) HiveBasicStatistics.createZeroStatistics(io.trino.plugin.hive.HiveBasicStatistics.createZeroStatistics) AcidSchema(io.trino.plugin.hive.acid.AcidSchema) HiveColumnStatistics(io.trino.plugin.hive.metastore.HiveColumnStatistics) Verify.verify(com.google.common.base.Verify.verify) SemiTransactionalHiveMetastore(io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore) Objects.requireNonNull(java.util.Objects.requireNonNull) Suppliers(com.google.common.base.Suppliers) HiveTableProperties.getOrcBloomFilterColumns(io.trino.plugin.hive.HiveTableProperties.getOrcBloomFilterColumns) HiveApplyProjectionUtil.extractSupportedProjectedColumns(io.trino.plugin.hive.HiveApplyProjectionUtil.extractSupportedProjectedColumns) Privilege(io.trino.spi.security.Privilege) VerifyException(com.google.common.base.VerifyException) APPEND(io.trino.plugin.hive.PartitionUpdate.UpdateMode.APPEND) HiveSessionProperties.getInsertExistingPartitionsBehavior(io.trino.plugin.hive.HiveSessionProperties.getInsertExistingPartitionsBehavior) TupleDomain.withColumnDomains(io.trino.spi.predicate.TupleDomain.withColumnDomains) NO_PRIVILEGES(io.trino.plugin.hive.metastore.PrincipalPrivileges.NO_PRIVILEGES) MalformedURLException(java.net.MalformedURLException) HiveSessionProperties.isPropagateTableScanSortingProperties(io.trino.plugin.hive.HiveSessionProperties.isPropagateTableScanSortingProperties) Statistics.createEmptyPartitionStatistics(io.trino.plugin.hive.util.Statistics.createEmptyPartitionStatistics) TupleDomain(io.trino.spi.predicate.TupleDomain) Maps(com.google.common.collect.Maps) OptimizeTableProcedure(io.trino.plugin.hive.procedure.OptimizeTableProcedure) SKIP_FOOTER_LINE_COUNT(io.trino.plugin.hive.HiveTableProperties.SKIP_FOOTER_LINE_COUNT) SYNTHESIZED(io.trino.plugin.hive.HiveColumnHandle.ColumnType.SYNTHESIZED) LocalProperty(io.trino.spi.connector.LocalProperty) BUCKET_COUNT_PROPERTY(io.trino.plugin.hive.HiveTableProperties.BUCKET_COUNT_PROPERTY) SortingColumn(io.trino.plugin.hive.metastore.SortingColumn) VisibleForTesting(com.google.common.annotations.VisibleForTesting) HiveUtil.getPartitionKeyColumnHandles(io.trino.plugin.hive.util.HiveUtil.getPartitionKeyColumnHandles) Collections(java.util.Collections) CSV_ESCAPE(io.trino.plugin.hive.HiveTableProperties.CSV_ESCAPE) REGULAR(io.trino.plugin.hive.HiveColumnHandle.ColumnType.REGULAR) TableNotFoundException(io.trino.spi.connector.TableNotFoundException) StorageFormat.fromHiveStorageFormat(io.trino.plugin.hive.metastore.StorageFormat.fromHiveStorageFormat) HiveTableProperties.getHiveStorageFormat(io.trino.plugin.hive.HiveTableProperties.getHiveStorageFormat) HiveSessionProperties.getHiveStorageFormat(io.trino.plugin.hive.HiveSessionProperties.getHiveStorageFormat) ImmutableList(com.google.common.collect.ImmutableList) HiveAnalyzeProperties.getPartitionList(io.trino.plugin.hive.HiveAnalyzeProperties.getPartitionList) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) ArrayList(java.util.ArrayList) List(java.util.List) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) Path(org.apache.hadoop.fs.Path) Partition(io.trino.plugin.hive.metastore.Partition) AcidTransaction.forCreateTable(io.trino.plugin.hive.acid.AcidTransaction.forCreateTable) SystemTables.getSourceTableNameFromSystemTable(io.trino.plugin.hive.util.SystemTables.getSourceTableNameFromSystemTable) Table(io.trino.plugin.hive.metastore.Table) AcidUtils.isTransactionalTable(org.apache.hadoop.hive.ql.io.AcidUtils.isTransactionalTable) HiveUtil.isIcebergTable(io.trino.plugin.hive.util.HiveUtil.isIcebergTable) HiveUtil.isSparkBucketedTable(io.trino.plugin.hive.util.HiveUtil.isSparkBucketedTable) HiveUtil.isDeltaLakeTable(io.trino.plugin.hive.util.HiveUtil.isDeltaLakeTable) AcidUtils.isFullAcidTable(org.apache.hadoop.hive.ql.io.AcidUtils.isFullAcidTable) SystemTable(io.trino.spi.connector.SystemTable) PrincipalPrivileges(io.trino.plugin.hive.metastore.PrincipalPrivileges) ColumnStatisticType(io.trino.spi.statistics.ColumnStatisticType) TimestampType(io.trino.spi.type.TimestampType) MapType(io.trino.spi.type.MapType) TableStatisticType(io.trino.spi.statistics.TableStatisticType) HiveType.toHiveType(io.trino.plugin.hive.HiveType.toHiveType) TableType(org.apache.hadoop.hive.metastore.TableType) ErrorType(io.trino.spi.ErrorType) RowType(io.trino.spi.type.RowType) ArrayType(io.trino.spi.type.ArrayType) HiveWriteUtils.isWritableType(io.trino.plugin.hive.util.HiveWriteUtils.isWritableType) Type(io.trino.spi.type.Type) VarcharType.createUnboundedVarcharType(io.trino.spi.type.VarcharType.createUnboundedVarcharType) Statistics.createEmptyPartitionStatistics(io.trino.plugin.hive.util.Statistics.createEmptyPartitionStatistics) Slice(io.airlift.slice.Slice) ComputedStatistics(io.trino.spi.statistics.ComputedStatistics) Statistics.fromComputedStatistics(io.trino.plugin.hive.util.Statistics.fromComputedStatistics) TrinoException(io.trino.spi.TrinoException)

Example 4 with ConnectorInsertTableHandle

use of io.trino.spi.connector.ConnectorInsertTableHandle in project trino by trinodb.

the class AbstractTestHive method doInsert.

private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
    // creating the table
    doCreateEmptyTable(tableName, storageFormat, CREATE_TABLE_COLUMNS);
    MaterializedResult.Builder resultBuilder = MaterializedResult.resultBuilder(SESSION, CREATE_TABLE_DATA.getTypes());
    for (int i = 0; i < 3; i++) {
        insertData(tableName, CREATE_TABLE_DATA);
        try (Transaction transaction = newTransaction()) {
            ConnectorSession session = newSession();
            ConnectorMetadata metadata = transaction.getMetadata();
            metadata.beginQuery(session);
            // load the new table
            ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
            List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
            // verify the metadata
            ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, getTableHandle(metadata, tableName));
            assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), CREATE_TABLE_COLUMNS);
            // verify the data
            resultBuilder.rows(CREATE_TABLE_DATA.getMaterializedRows());
            MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
            assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows());
            // statistics
            HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName);
            assertEquals(tableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * (i + 1));
            assertEquals(tableStatistics.getFileCount().getAsLong(), i + 1L);
            assertGreaterThan(tableStatistics.getInMemoryDataSizeInBytes().getAsLong(), 0L);
            assertGreaterThan(tableStatistics.getOnDiskDataSizeInBytes().getAsLong(), 0L);
        }
    }
    // test rollback
    Set<String> existingFiles;
    try (Transaction transaction = newTransaction()) {
        existingFiles = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName());
        assertFalse(existingFiles.isEmpty());
    }
    Path stagingPathRoot;
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        // "stage" insert data
        ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES);
        ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle);
        sink.appendPage(CREATE_TABLE_DATA.toPage());
        sink.appendPage(CREATE_TABLE_DATA.toPage());
        Collection<Slice> fragments = getFutureValue(sink.finish());
        metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
        // statistics, visible from within transaction
        HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName);
        assertEquals(tableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 5L);
        try (Transaction otherTransaction = newTransaction()) {
            // statistics, not visible from outside transaction
            HiveBasicStatistics otherTableStatistics = getBasicStatisticsForTable(otherTransaction, tableName);
            assertEquals(otherTableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 3L);
        }
        // verify we did not modify the table directory
        assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles);
        // verify all temp files start with the unique prefix
        stagingPathRoot = getStagingPathRoot(insertTableHandle);
        HdfsContext context = new HdfsContext(session);
        Set<String> tempFiles = listAllDataFiles(context, stagingPathRoot);
        assertTrue(!tempFiles.isEmpty());
        for (String filePath : tempFiles) {
            assertThat(new Path(filePath).getName()).startsWith(session.getQueryId());
        }
        // rollback insert
        transaction.rollback();
    }
    // verify temp directory is empty
    HdfsContext context = new HdfsContext(newSession());
    assertTrue(listAllDataFiles(context, stagingPathRoot).isEmpty());
    // verify the data is unchanged
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
        assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows());
        // verify we did not modify the table directory
        assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles);
    }
    // verify statistics unchanged
    try (Transaction transaction = newTransaction()) {
        HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName);
        assertEquals(statistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 3L);
        assertEquals(statistics.getFileCount().getAsLong(), 3L);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HiveSessionProperties.getTemporaryStagingDirectoryPath(io.trino.plugin.hive.HiveSessionProperties.getTemporaryStagingDirectoryPath) HiveColumnHandle.bucketColumnHandle(io.trino.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.trino.spi.connector.ColumnHandle) ConnectorInsertTableHandle(io.trino.spi.connector.ConnectorInsertTableHandle) Constraint(io.trino.spi.connector.Constraint) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) Slice(io.airlift.slice.Slice) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) MaterializedResult(io.trino.testing.MaterializedResult) ConnectorPageSink(io.trino.spi.connector.ConnectorPageSink) ConnectorTableMetadata(io.trino.spi.connector.ConnectorTableMetadata)

Example 5 with ConnectorInsertTableHandle

use of io.trino.spi.connector.ConnectorInsertTableHandle in project trino by trinodb.

the class TestRaptorConnector method assertSplitShard.

private void assertSplitShard(Type temporalType, String min, String max, int expectedSplits) throws Exception {
    ConnectorSession session = TestingConnectorSession.builder().setPropertyMetadata(new RaptorSessionProperties(new StorageManagerConfig()).getSessionProperties()).build();
    ConnectorTransactionHandle transaction = beginTransaction();
    connector.getMetadata(SESSION, transaction).createTable(SESSION, new ConnectorTableMetadata(new SchemaTableName("test", "test"), ImmutableList.of(new ColumnMetadata("id", BIGINT), new ColumnMetadata("time", temporalType)), ImmutableMap.of(TEMPORAL_COLUMN_PROPERTY, "time")), false);
    connector.commit(transaction);
    ConnectorTransactionHandle txn1 = beginTransaction();
    ConnectorTableHandle handle1 = getTableHandle(connector.getMetadata(SESSION, txn1), "test");
    ConnectorInsertTableHandle insertTableHandle = connector.getMetadata(SESSION, txn1).beginInsert(session, handle1);
    ConnectorPageSink raptorPageSink = connector.getPageSinkProvider().createPageSink(txn1, session, insertTableHandle);
    Object timestamp1 = null;
    Object timestamp2 = null;
    if (temporalType.equals(TIMESTAMP_MILLIS)) {
        timestamp1 = SqlTimestamp.newInstance(3, castToShortTimestamp(TIMESTAMP_MILLIS.getPrecision(), min), 0);
        timestamp2 = SqlTimestamp.newInstance(3, castToShortTimestamp(TIMESTAMP_MILLIS.getPrecision(), max), 0);
    } else if (temporalType.equals(DATE)) {
        timestamp1 = new SqlDate(parseDate(min));
        timestamp2 = new SqlDate(parseDate(max));
    }
    Page inputPage = MaterializedResult.resultBuilder(session, ImmutableList.of(BIGINT, temporalType)).row(1L, timestamp1).row(2L, timestamp2).build().toPage();
    raptorPageSink.appendPage(inputPage);
    Collection<Slice> shards = raptorPageSink.finish().get();
    assertEquals(shards.size(), expectedSplits);
    connector.getMetadata(session, txn1).dropTable(session, handle1);
    connector.commit(txn1);
}
Also used : ColumnMetadata(io.trino.spi.connector.ColumnMetadata) ConnectorInsertTableHandle(io.trino.spi.connector.ConnectorInsertTableHandle) ConnectorTransactionHandle(io.trino.spi.connector.ConnectorTransactionHandle) Page(io.trino.spi.Page) SchemaTableName(io.trino.spi.connector.SchemaTableName) StorageManagerConfig(io.trino.plugin.raptor.legacy.storage.StorageManagerConfig) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) Slice(io.airlift.slice.Slice) SqlDate(io.trino.spi.type.SqlDate) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) ConnectorPageSink(io.trino.spi.connector.ConnectorPageSink) ConnectorTableMetadata(io.trino.spi.connector.ConnectorTableMetadata)

Aggregations

ConnectorInsertTableHandle (io.trino.spi.connector.ConnectorInsertTableHandle)16 ConnectorTableHandle (io.trino.spi.connector.ConnectorTableHandle)15 Slice (io.airlift.slice.Slice)13 ConnectorMetadata (io.trino.spi.connector.ConnectorMetadata)13 ConnectorSession (io.trino.spi.connector.ConnectorSession)13 HdfsContext (io.trino.plugin.hive.HdfsEnvironment.HdfsContext)11 ColumnHandle (io.trino.spi.connector.ColumnHandle)11 Path (org.apache.hadoop.fs.Path)11 ConnectorTableMetadata (io.trino.spi.connector.ConnectorTableMetadata)10 Constraint (io.trino.spi.connector.Constraint)10 TrinoException (io.trino.spi.TrinoException)8 ColumnMetadata (io.trino.spi.connector.ColumnMetadata)8 SchemaTableName (io.trino.spi.connector.SchemaTableName)8 ImmutableList (com.google.common.collect.ImmutableList)7 ImmutableMap (com.google.common.collect.ImmutableMap)7 ImmutableSet (com.google.common.collect.ImmutableSet)7 JsonCodec (io.airlift.json.JsonCodec)7 Logger (io.airlift.log.Logger)7 NOT_SUPPORTED (io.trino.spi.StandardErrorCode.NOT_SUPPORTED)7 ConnectorOutputTableHandle (io.trino.spi.connector.ConnectorOutputTableHandle)7