Search in sources :

Example 11 with StorageFormat

use of io.prestosql.plugin.hive.metastore.StorageFormat in project hetu-core by openlookeng.

the class CarbondataMetadata method beginCreateTable.

@Override
public CarbondataOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorNewTableLayout> layout) {
    // get the root directory for the database
    SchemaTableName finalSchemaTableName = tableMetadata.getTable();
    String finalSchemaName = finalSchemaTableName.getSchemaName();
    String finalTableName = finalSchemaTableName.getTableName();
    this.user = session.getUser();
    this.schemaName = finalSchemaName;
    currentState = State.CREATE_TABLE_AS;
    List<String> partitionedBy = new ArrayList<String>();
    List<SortingColumn> sortBy = new ArrayList<SortingColumn>();
    List<HiveColumnHandle> columnHandles = new ArrayList<HiveColumnHandle>();
    Map<String, String> tableProperties = new HashMap<String, String>();
    getParametersForCreateTable(session, tableMetadata, partitionedBy, sortBy, columnHandles, tableProperties);
    metastore.getDatabase(finalSchemaName).orElseThrow(() -> new SchemaNotFoundException(finalSchemaName));
    // to avoid type mismatch between HiveStorageFormat & Carbondata StorageFormat this hack no option
    HiveStorageFormat tableStorageFormat = HiveStorageFormat.valueOf("CARBON");
    HiveStorageFormat partitionStorageFormat = tableStorageFormat;
    Map<String, HiveColumnHandle> columnHandlesByName = Maps.uniqueIndex(columnHandles, HiveColumnHandle::getName);
    List<Column> partitionColumns = partitionedBy.stream().map(columnHandlesByName::get).map(column -> new Column(column.getName(), column.getHiveType(), column.getComment())).collect(toList());
    checkPartitionTypesSupported(partitionColumns);
    // it will get final path to create carbon table
    LocationHandle locationHandle = getCarbonDataTableCreationPath(session, tableMetadata, HiveWriteUtils.OpertionType.CREATE_TABLE_AS);
    Path targetPath = locationService.getTableWriteInfo(locationHandle, false).getTargetPath();
    AbsoluteTableIdentifier finalAbsoluteTableIdentifier = AbsoluteTableIdentifier.from(targetPath.toString(), new CarbonTableIdentifier(finalSchemaName, finalTableName, UUID.randomUUID().toString()));
    hdfsEnvironment.doAs(session.getUser(), () -> {
        initialConfiguration = ConfigurationUtils.toJobConf(this.hdfsEnvironment.getConfiguration(new HdfsEnvironment.HdfsContext(session, finalSchemaName, finalTableName), new Path(locationHandle.getJsonSerializableTargetPath())));
        // Create Carbondata metadata folder and Schema file
        CarbondataMetadataUtils.createMetaDataFolderSchemaFile(hdfsEnvironment, session, columnHandles, finalAbsoluteTableIdentifier, partitionedBy, sortBy.stream().map(s -> s.getColumnName().toLowerCase(Locale.ENGLISH)).collect(toList()), targetPath.toString(), initialConfiguration);
        this.tableStorageLocation = Optional.of(targetPath.toString());
        Path outputPath = new Path(locationHandle.getJsonSerializableTargetPath());
        Properties schema = readSchemaForCarbon(finalSchemaName, finalTableName, targetPath, columnHandles, partitionColumns);
        // Create committer object
        setupCommitWriter(schema, outputPath, initialConfiguration, false);
    });
    try {
        CarbondataOutputTableHandle result = new CarbondataOutputTableHandle(finalSchemaName, finalTableName, columnHandles, metastore.generatePageSinkMetadata(new HiveIdentity(session), finalSchemaTableName), locationHandle, tableStorageFormat, partitionStorageFormat, partitionedBy, Optional.empty(), session.getUser(), tableProperties, ImmutableMap.<String, String>of(EncodedLoadModel, jobContext.getConfiguration().get(LOAD_MODEL)));
        LocationService.WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
        metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), finalSchemaTableName);
        return result;
    } catch (RuntimeException ex) {
        throw new PrestoException(GENERIC_INTERNAL_ERROR, format("Error: creating table: %s ", ex.getMessage()), ex);
    }
}
Also used : Arrays(java.util.Arrays) StorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat) BaseStorageFormat(io.prestosql.plugin.hive.BaseStorageFormat) HiveTableHandle(io.prestosql.plugin.hive.HiveTableHandle) FileSystem(org.apache.hadoop.fs.FileSystem) HiveWriteUtils(io.prestosql.plugin.hive.HiveWriteUtils) HiveUtil.hiveColumnHandles(io.prestosql.plugin.hive.HiveUtil.hiveColumnHandles) MetastoreUtil(io.prestosql.plugin.hive.metastore.MetastoreUtil) TableAlreadyExistsException(io.prestosql.spi.connector.TableAlreadyExistsException) ConnectorVacuumTableHandle(io.prestosql.spi.connector.ConnectorVacuumTableHandle) StringUtils(org.apache.commons.lang3.StringUtils) CarbonLockFactory(org.apache.carbondata.core.locks.CarbonLockFactory) HiveUtil.getPartitionKeyColumnHandles(io.prestosql.plugin.hive.HiveUtil.getPartitionKeyColumnHandles) ConnectorDeleteAsInsertTableHandle(io.prestosql.spi.connector.ConnectorDeleteAsInsertTableHandle) CarbonCommonConstants(org.apache.carbondata.core.constants.CarbonCommonConstants) Future(java.util.concurrent.Future) TableNotFoundException(io.prestosql.spi.connector.TableNotFoundException) ConnectorUpdateTableHandle(io.prestosql.spi.connector.ConnectorUpdateTableHandle) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) HIVE_STRING(io.prestosql.plugin.hive.HiveType.HIVE_STRING) StringEscapeUtils(org.apache.commons.lang3.StringEscapeUtils) HiveErrorCode(io.prestosql.plugin.hive.HiveErrorCode) ThriftWrapperSchemaConverterImpl(org.apache.carbondata.core.metadata.converter.ThriftWrapperSchemaConverterImpl) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) Set(java.util.Set) LOCATION_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.LOCATION_PROPERTY) HiveTableProperties.getTransactionalValue(io.prestosql.plugin.hive.HiveTableProperties.getTransactionalValue) HiveOutputTableHandle(io.prestosql.plugin.hive.HiveOutputTableHandle) Collectors.joining(java.util.stream.Collectors.joining) BlockMappingVO(org.apache.carbondata.core.mutate.data.BlockMappingVO) CarbonLoadModel(org.apache.carbondata.processing.loading.model.CarbonLoadModel) META_TABLE_NAME(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_NAME) Table(io.prestosql.plugin.hive.metastore.Table) GENERIC_INTERNAL_ERROR(io.prestosql.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR) AccessControlMetadata(io.prestosql.plugin.hive.security.AccessControlMetadata) TableOptionConstant(org.apache.carbondata.processing.util.TableOptionConstant) SortingColumn(io.prestosql.plugin.hive.metastore.SortingColumn) PartitionInfo(org.apache.carbondata.core.metadata.schema.PartitionInfo) TypeTranslator(io.prestosql.plugin.hive.TypeTranslator) ConnectorVacuumTableInfo(io.prestosql.spi.connector.ConnectorVacuumTableInfo) MapredCarbonOutputFormat(org.apache.carbondata.hive.MapredCarbonOutputFormat) StructField(org.apache.carbondata.core.metadata.datatype.StructField) CarbonUtil(org.apache.carbondata.core.util.CarbonUtil) CarbondataTableProperties.getCarbondataLocation(io.hetu.core.plugin.carbondata.CarbondataTableProperties.getCarbondataLocation) HiveWriterFactory(io.prestosql.plugin.hive.HiveWriterFactory) Database(io.prestosql.plugin.hive.metastore.Database) SchemaEvolutionEntry(org.apache.carbondata.core.metadata.schema.SchemaEvolutionEntry) Slice(io.airlift.slice.Slice) Partition(io.prestosql.plugin.hive.metastore.Partition) TRANSACTIONAL(io.prestosql.plugin.hive.HiveTableProperties.TRANSACTIONAL) DataTypes(org.apache.carbondata.core.metadata.datatype.DataTypes) CarbonDataMergerUtil(org.apache.carbondata.processing.merger.CarbonDataMergerUtil) SimpleDateFormat(java.text.SimpleDateFormat) ComputedStatistics(io.prestosql.spi.statistics.ComputedStatistics) CarbondataTableReader(io.hetu.core.plugin.carbondata.impl.CarbondataTableReader) ArrayList(java.util.ArrayList) HdfsEnvironment(io.prestosql.plugin.hive.HdfsEnvironment) ThreadLocalSessionInfo(org.apache.carbondata.core.util.ThreadLocalSessionInfo) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) LocationService(io.prestosql.plugin.hive.LocationService) LockUsage(org.apache.carbondata.core.locks.LockUsage) CarbonUpdateUtil(org.apache.carbondata.core.mutate.CarbonUpdateUtil) SemiTransactionalHiveMetastore(io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore) SegmentStatusManager(org.apache.carbondata.core.statusmanager.SegmentStatusManager) ConnectorOutputTableHandle(io.prestosql.spi.connector.ConnectorOutputTableHandle) Properties(java.util.Properties) PartitionStatistics(io.prestosql.plugin.hive.PartitionStatistics) CarbonFile(org.apache.carbondata.core.datastore.filesystem.CarbonFile) CarbonOutputCommitter(org.apache.carbondata.hadoop.api.CarbonOutputCommitter) HiveStorageFormat(io.prestosql.plugin.hive.HiveStorageFormat) CarbonTablePath(org.apache.carbondata.core.util.path.CarbonTablePath) HiveInsertTableHandle(io.prestosql.plugin.hive.HiveInsertTableHandle) HiveTableProperties(io.prestosql.plugin.hive.HiveTableProperties) TypeManager(io.prestosql.spi.type.TypeManager) ICarbonLock(org.apache.carbondata.core.locks.ICarbonLock) IOException(java.io.IOException) CarbonTableIdentifier(org.apache.carbondata.core.metadata.CarbonTableIdentifier) PrincipalPrivileges(io.prestosql.plugin.hive.metastore.PrincipalPrivileges) ConnectorTableMetadata(io.prestosql.spi.connector.ConnectorTableMetadata) File(java.io.File) ColumnSchema(org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema) ExecutionException(java.util.concurrent.ExecutionException) OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) TreeMap(java.util.TreeMap) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) AbsoluteTableIdentifier(org.apache.carbondata.core.metadata.AbsoluteTableIdentifier) HiveWrittenPartitions(io.prestosql.plugin.hive.HiveWrittenPartitions) TableType(org.apache.hadoop.hive.metastore.TableType) META_TABLE_LOCATION(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_LOCATION) ConfigurationUtils(io.prestosql.plugin.hive.util.ConfigurationUtils) LocationHandle(io.prestosql.plugin.hive.LocationHandle) CarbonTableOutputFormat(org.apache.carbondata.hadoop.api.CarbonTableOutputFormat) CarbonMetadata(org.apache.carbondata.core.metadata.CarbonMetadata) HiveBasicStatistics(io.prestosql.plugin.hive.HiveBasicStatistics) HivePartitionManager(io.prestosql.plugin.hive.HivePartitionManager) ThriftWriter(org.apache.carbondata.core.writer.ThriftWriter) HiveTypeName(io.prestosql.plugin.hive.HiveTypeName) Date(java.util.Date) SYNTHESIZED(io.prestosql.plugin.hive.HiveColumnHandle.ColumnType.SYNTHESIZED) HiveColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle) Duration(io.airlift.units.Duration) SegmentFileStore(org.apache.carbondata.core.metadata.SegmentFileStore) TaskType(org.apache.hadoop.mapreduce.TaskType) Logger(org.apache.log4j.Logger) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) Gson(com.google.gson.Gson) Locale(java.util.Locale) HiveCarbonUtil(org.apache.carbondata.hive.util.HiveCarbonUtil) Path(org.apache.hadoop.fs.Path) SegmentUpdateStatusManager(org.apache.carbondata.core.statusmanager.SegmentUpdateStatusManager) Type(io.prestosql.spi.type.Type) CarbonTable(org.apache.carbondata.core.metadata.schema.table.CarbonTable) EncodedLoadModel(io.hetu.core.plugin.carbondata.CarbondataConstants.EncodedLoadModel) HiveBucketing(io.prestosql.plugin.hive.HiveBucketing) PrestoException(io.prestosql.spi.PrestoException) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) FileInputFormat(org.apache.hadoop.mapred.FileInputFormat) Collection(java.util.Collection) UUID(java.util.UUID) TableSchema(org.apache.carbondata.core.metadata.schema.table.TableSchema) HiveType(io.prestosql.plugin.hive.HiveType) CarbonLockUtil(org.apache.carbondata.core.locks.CarbonLockUtil) Collectors(java.util.stream.Collectors) String.format(java.lang.String.format) CarbonTableInputFormat(org.apache.carbondata.hadoop.api.CarbonTableInputFormat) List(java.util.List) TaskAttemptContextImpl(org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl) Job(org.apache.hadoop.mapreduce.Job) TableSchemaBuilder(org.apache.carbondata.core.metadata.schema.table.TableSchemaBuilder) Optional(java.util.Optional) NOT_SUPPORTED(io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED) HiveStatisticsProvider(io.prestosql.plugin.hive.statistics.HiveStatisticsProvider) JsonCodec(io.airlift.json.JsonCodec) HiveBucketProperty(io.prestosql.plugin.hive.HiveBucketProperty) ConnectorOutputMetadata(io.prestosql.spi.connector.ConnectorOutputMetadata) Segment(org.apache.carbondata.core.index.Segment) HiveSessionProperties(io.prestosql.plugin.hive.HiveSessionProperties) ConnectorNewTableLayout(io.prestosql.spi.connector.ConnectorNewTableLayout) TableInfo(org.apache.carbondata.core.metadata.schema.table.TableInfo) HashMap(java.util.HashMap) TableOperation(org.apache.carbondata.core.features.TableOperation) CompactionType(org.apache.carbondata.processing.merger.CompactionType) IOConstants(org.apache.hadoop.hive.ql.io.IOConstants) HiveUtil.toPartitionValues(io.prestosql.plugin.hive.HiveUtil.toPartitionValues) FileFactory(org.apache.carbondata.core.datastore.impl.FileFactory) SegmentStatus(org.apache.carbondata.core.statusmanager.SegmentStatus) LoadMetadataDetails(org.apache.carbondata.core.statusmanager.LoadMetadataDetails) Function(java.util.function.Function) ObjectSerializationUtil(org.apache.carbondata.core.util.ObjectSerializationUtil) JobContextImpl(org.apache.hadoop.mapreduce.task.JobContextImpl) HashSet(java.util.HashSet) JobStatus(org.apache.hadoop.mapred.JobStatus) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) ImmutableList(com.google.common.collect.ImmutableList) FileWriteOperation(org.apache.carbondata.core.fileoperations.FileWriteOperation) Objects.requireNonNull(java.util.Objects.requireNonNull) CarbonLoaderUtil(org.apache.carbondata.processing.util.CarbonLoaderUtil) HiveACIDWriteType(io.prestosql.plugin.hive.HiveACIDWriteType) HiveMetadata(io.prestosql.plugin.hive.HiveMetadata) LogServiceFactory(org.apache.carbondata.common.logging.LogServiceFactory) JobID(org.apache.hadoop.mapreduce.JobID) NoSuchElementException(java.util.NoSuchElementException) SegmentUpdateDetails(org.apache.carbondata.core.mutate.SegmentUpdateDetails) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) HiveUpdateTableHandle(io.prestosql.plugin.hive.HiveUpdateTableHandle) TableProcessingOperations(org.apache.carbondata.processing.loading.TableProcessingOperations) ColumnMetadata(io.prestosql.spi.connector.ColumnMetadata) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) NON_INHERITABLE_PROPERTIES(io.prestosql.plugin.hive.HiveTableProperties.NON_INHERITABLE_PROPERTIES) SchemaNotFoundException(io.prestosql.spi.connector.SchemaNotFoundException) NoSuchMVException(org.apache.carbondata.common.exceptions.sql.NoSuchMVException) Maps(com.google.common.collect.Maps) HiveDeleteAsInsertTableHandle(io.prestosql.plugin.hive.HiveDeleteAsInsertTableHandle) RowCountDetailsVO(org.apache.carbondata.core.mutate.data.RowCountDetailsVO) CarbondataTableCacheModel(io.hetu.core.plugin.carbondata.impl.CarbondataTableCacheModel) PartitionUpdate(io.prestosql.plugin.hive.PartitionUpdate) JobConf(org.apache.hadoop.mapred.JobConf) TimeUnit(java.util.concurrent.TimeUnit) Collectors.toList(java.util.stream.Collectors.toList) ConcurrentSkipListSet(java.util.concurrent.ConcurrentSkipListSet) Column(io.prestosql.plugin.hive.metastore.Column) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) ConnectorInsertTableHandle(io.prestosql.spi.connector.ConnectorInsertTableHandle) SchemaConverter(org.apache.carbondata.core.metadata.converter.SchemaConverter) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) PrestoException(io.prestosql.spi.PrestoException) Properties(java.util.Properties) HiveTableProperties(io.prestosql.plugin.hive.HiveTableProperties) HiveSessionProperties(io.prestosql.plugin.hive.HiveSessionProperties) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) LocationHandle(io.prestosql.plugin.hive.LocationHandle) HdfsEnvironment(io.prestosql.plugin.hive.HdfsEnvironment) LocationService(io.prestosql.plugin.hive.LocationService) CarbonTableIdentifier(org.apache.carbondata.core.metadata.CarbonTableIdentifier) HiveStorageFormat(io.prestosql.plugin.hive.HiveStorageFormat) SortingColumn(io.prestosql.plugin.hive.metastore.SortingColumn) Column(io.prestosql.plugin.hive.metastore.Column) HiveColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle) CarbonTablePath(org.apache.carbondata.core.util.path.CarbonTablePath) Path(org.apache.hadoop.fs.Path) SortingColumn(io.prestosql.plugin.hive.metastore.SortingColumn) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) AbsoluteTableIdentifier(org.apache.carbondata.core.metadata.AbsoluteTableIdentifier) SchemaNotFoundException(io.prestosql.spi.connector.SchemaNotFoundException)

Example 12 with StorageFormat

use of io.prestosql.plugin.hive.metastore.StorageFormat in project hetu-core by openlookeng.

the class HiveWriterFactory method createWriter.

private HiveWriter createWriter(List<String> partitionValues, OptionalInt bucketNumber, Optional<Options> vacuumOptions, boolean forMerge) {
    boolean isTxnTable = isTxnTable();
    if (bucketCount.isPresent()) {
        checkArgument(bucketNumber.isPresent(), "Bucket not provided for bucketed table");
        checkArgument(bucketNumber.getAsInt() < bucketCount.getAsInt(), "Bucket number %s must be less than bucket count %s", bucketNumber, bucketCount);
    } else {
        checkArgument(isTxnTable || !bucketNumber.isPresent(), "Bucket number provided by for table that is not bucketed");
    }
    String fileName;
    if (bucketNumber.isPresent()) {
        fileName = computeBucketedFileName(queryId, bucketNumber.getAsInt());
    } else {
        // Snapshot: don't use UUID. File name needs to be deterministic.
        if (isSnapshotEnabled) {
            fileName = String.format(ENGLISH, "%s_%d_%d_%d", queryId, session.getTaskId().getAsInt(), session.getPipelineId().getAsInt(), session.getDriverId().getAsInt());
        } else {
            fileName = queryId + "_" + randomUUID();
        }
    }
    Optional<String> partitionName;
    if (!partitionColumnNames.isEmpty()) {
        partitionName = Optional.of(FileUtils.makePartName(partitionColumnNames, partitionValues));
    } else {
        partitionName = Optional.empty();
    }
    // attempt to get the existing partition (if this is an existing partitioned table)
    Optional<Partition> partition = Optional.empty();
    if (!partitionValues.isEmpty() && table != null) {
        partition = pageSinkMetadataProvider.getPartition(partitionValues);
    }
    UpdateMode updateMode;
    Properties schema;
    WriteInfo writeInfo;
    StorageFormat outputStorageFormat;
    if (!partition.isPresent()) {
        if (table == null) {
            // Write to: a new partition in a new partitioned table,
            // or a new unpartitioned table.
            updateMode = UpdateMode.NEW;
            schema = new Properties();
            schema.setProperty(IOConstants.COLUMNS, dataColumns.stream().map(DataColumn::getName).collect(joining(",")));
            schema.setProperty(IOConstants.COLUMNS_TYPES, dataColumns.stream().map(DataColumn::getHiveType).map(HiveType::getHiveTypeName).map(HiveTypeName::toString).collect(joining(":")));
            setAdditionalSchemaProperties(schema);
            if (!partitionName.isPresent()) {
                // new unpartitioned table
                writeInfo = locationService.getTableWriteInfo(locationHandle, false);
            } else {
                // a new partition in a new partitioned table
                writeInfo = locationService.getPartitionWriteInfo(locationHandle, partition, partitionName.get());
                if (!writeInfo.getWriteMode().isWritePathSameAsTargetPath()) {
                    // verify that the target directory for the partition does not already exist
                    if (HiveWriteUtils.pathExists(new HdfsContext(session, schemaName, tableName), hdfsEnvironment, writeInfo.getTargetPath())) {
                        throw new PrestoException(HIVE_PATH_ALREADY_EXISTS, format("Target directory for new partition '%s' of table '%s.%s' already exists: %s", partitionName, schemaName, tableName, writeInfo.getTargetPath()));
                    }
                }
            }
        } else {
            // or an existing unpartitioned table
            if (partitionName.isPresent()) {
                // a new partition in an existing partitioned table
                updateMode = UpdateMode.NEW;
                writeInfo = locationService.getPartitionWriteInfo(locationHandle, partition, partitionName.get());
            } else {
                switch(insertExistingPartitionsBehavior) {
                    case APPEND:
                        checkState(!immutablePartitions);
                        updateMode = UpdateMode.APPEND;
                        writeInfo = locationService.getTableWriteInfo(locationHandle, false);
                        break;
                    case OVERWRITE:
                        updateMode = UpdateMode.OVERWRITE;
                        writeInfo = locationService.getTableWriteInfo(locationHandle, true);
                        break;
                    case ERROR:
                        throw new PrestoException(HIVE_TABLE_READ_ONLY, "Unpartitioned Hive tables are immutable");
                    default:
                        throw new IllegalArgumentException("Unsupported insert existing table behavior: " + insertExistingPartitionsBehavior);
                }
            }
            schema = getHiveSchema(table);
        }
        if (partitionName.isPresent()) {
            // Write to a new partition
            outputStorageFormat = fromHiveStorageFormat(partitionStorageFormat);
        } else {
            // Write to a new/existing unpartitioned table
            outputStorageFormat = fromHiveStorageFormat(tableStorageFormat);
        }
    } else {
        // Write to: an existing partition in an existing partitioned table
        if (insertExistingPartitionsBehavior == InsertExistingPartitionsBehavior.APPEND) {
            // Append to an existing partition
            checkState(!immutablePartitions);
            updateMode = UpdateMode.APPEND;
            // Check the column types in partition schema match the column types in table schema
            List<Column> tableColumns = table.getDataColumns();
            List<Column> existingPartitionColumns = partition.get().getColumns();
            for (int i = 0; i < min(existingPartitionColumns.size(), tableColumns.size()); i++) {
                HiveType tableType = tableColumns.get(i).getType();
                HiveType partitionType = existingPartitionColumns.get(i).getType();
                if (!tableType.equals(partitionType)) {
                    throw new PrestoException(HIVE_PARTITION_SCHEMA_MISMATCH, format("" + "You are trying to write into an existing partition in a table. " + "The table schema has changed since the creation of the partition. " + "Inserting rows into such partition is not supported. " + "The column '%s' in table '%s' is declared as type '%s', " + "but partition '%s' declared column '%s' as type '%s'.", tableColumns.get(i).getName(), tableName, tableType, partitionName, existingPartitionColumns.get(i).getName(), partitionType));
                }
            }
            HiveWriteUtils.checkPartitionIsWritable(partitionName.get(), partition.get());
            outputStorageFormat = partition.get().getStorage().getStorageFormat();
            schema = getHiveSchema(partition.get(), table);
            writeInfo = locationService.getPartitionWriteInfo(locationHandle, partition, partitionName.get());
        } else if (insertExistingPartitionsBehavior == InsertExistingPartitionsBehavior.OVERWRITE) {
            // Overwrite an existing partition
            // 
            // The behavior of overwrite considered as if first dropping the partition and inserting a new partition, thus:
            // * No partition writable check is required.
            // * Table schema and storage format is used for the new partition (instead of existing partition schema and storage format).
            updateMode = UpdateMode.OVERWRITE;
            outputStorageFormat = fromHiveStorageFormat(partitionStorageFormat);
            schema = getHiveSchema(table);
            writeInfo = locationService.getPartitionWriteInfo(locationHandle, Optional.empty(), partitionName.get());
            checkWriteMode(writeInfo);
        } else if (insertExistingPartitionsBehavior == InsertExistingPartitionsBehavior.ERROR) {
            throw new PrestoException(HIVE_PARTITION_READ_ONLY, "Cannot insert into an existing partition of Hive table: " + partitionName.get());
        } else {
            throw new IllegalArgumentException(format("Unsupported insert existing partitions behavior: %s", insertExistingPartitionsBehavior));
        }
    }
    schema.putAll(additionalTableParameters);
    if (acidWriteType != HiveACIDWriteType.DELETE) {
        validateSchema(partitionName, schema);
    }
    Path path;
    Optional<AcidOutputFormat.Options> acidOptions;
    String fileNameWithExtension;
    if (isTxnTable) {
        WriteIdInfo writeIdInfo = locationHandle.getJsonSerializablewriteIdInfo().get();
        AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).minimumWriteId(writeIdInfo.getMinWriteId()).maximumWriteId(writeIdInfo.getMaxWriteId()).statementId(writeIdInfo.getStatementId()).bucket(bucketNumber.isPresent() ? bucketNumber.getAsInt() : 0);
        if (acidWriteType == HiveACIDWriteType.DELETE) {
            // to support delete as insert
            options.writingDeleteDelta(true);
        } else if (acidWriteType == HiveACIDWriteType.INSERT_OVERWRITE) {
            // In case of ACID txn tables, dont delete old data. Just create new base in same partition.
            options.writingBase(true);
        }
        if (vacuumOptions.isPresent() && HiveACIDWriteType.isVacuum(acidWriteType)) {
            Options vOptions = vacuumOptions.get();
            // Use the original bucket file number itself.
            // Compacted delta directories will not have statementId
            options.maximumWriteId(vOptions.getMaximumWriteId()).minimumWriteId(vOptions.getMinimumWriteId()).writingBase(vOptions.isWritingBase()).writingDeleteDelta(vOptions.isWritingDeleteDelta()).bucket(vOptions.getBucketId()).statementId(-1);
        }
        if (AcidUtils.isInsertOnlyTable(schema)) {
            String subdir;
            if (options.isWritingBase()) {
                subdir = AcidUtils.baseDir(options.getMaximumWriteId());
            } else if (HiveACIDWriteType.isVacuum(acidWriteType)) {
                // Only for Minor compacted delta will not have statement Id.
                subdir = AcidUtils.deltaSubdir(options.getMinimumWriteId(), options.getMaximumWriteId());
            } else {
                subdir = AcidUtils.deltaSubdir(options.getMinimumWriteId(), options.getMaximumWriteId(), options.getStatementId());
            }
            Path parentDir = new Path(writeInfo.getWritePath(), subdir);
            fileName = String.format("%06d", options.getBucketId()) + "_0" + getFileExtension(conf, outputStorageFormat);
            path = new Path(parentDir, fileName);
            Properties properties = new Properties();
            properties.setProperty("transactional_properties", "insert_only");
            options.tableProperties(properties);
        } else {
            path = AcidUtils.createFilename(writeInfo.getWritePath(), options);
        }
        // In case of ACID entire delta directory should be renamed from staging directory.
        fileNameWithExtension = path.getParent().getName();
        acidOptions = Optional.of(options);
    } else {
        fileNameWithExtension = fileName + getFileExtension(conf, outputStorageFormat);
        path = new Path(writeInfo.getWritePath(), fileNameWithExtension);
        acidOptions = Optional.empty();
    }
    FileSystem fileSystem;
    try {
        fileSystem = hdfsEnvironment.getFileSystem(session.getUser(), path, conf);
    } catch (IOException e) {
        throw new PrestoException(HIVE_WRITER_OPEN_ERROR, e);
    }
    if (isSnapshotEnabled) {
        // Snapshot: use a recognizable name pattern, in case they need to be deleted/renamed
        String oldFileName = path.getName();
        String newFileName = toSnapshotFileName(oldFileName, queryId);
        path = new Path(path.getParent(), newFileName);
        if (fileNameWithExtension.equals(oldFileName)) {
            fileNameWithExtension = newFileName;
        }
    }
    HiveFileWriter hiveFileWriter = null;
    if (isSnapshotEnabled && !forMerge) {
        // Add a suffix to file name for sub files
        String oldFileName = path.getName();
        String newFileName = toSnapshotSubFile(oldFileName);
        path = new Path(path.getParent(), newFileName);
        if (fileNameWithExtension.equals(oldFileName)) {
            fileNameWithExtension = newFileName;
        }
        // Always create a simple ORC writer for snapshot files. These will be merged in the end.
        logContainingFolderInfo(fileSystem, path, "Creating SnapshotTempFileWriter for %s", path);
        try {
            Path finalPath = path;
            hiveFileWriter = new SnapshotTempFileWriter(orcFileWriterFactory.createOrcDataSink(session, fileSystem, path), dataColumns.stream().map(column -> column.getHiveType().getType(typeManager)).collect(Collectors.toList()));
        } catch (IOException e) {
            throw new PrestoException(HiveErrorCode.HIVE_WRITER_OPEN_ERROR, "Error creating ORC file", e);
        }
    } else {
        conf.set("table.write.path", writeInfo.getWritePath().toString());
        for (HiveFileWriterFactory fileWriterFactory : fileWriterFactories) {
            Optional<HiveFileWriter> fileWriter = fileWriterFactory.createFileWriter(path, dataColumns.stream().map(DataColumn::getName).collect(toList()), outputStorageFormat, schema, conf, session, acidOptions, Optional.of(acidWriteType));
            if (fileWriter.isPresent()) {
                hiveFileWriter = fileWriter.get();
                break;
            }
        }
        if (isSnapshotEnabled) {
            // TODO-cp-I2BZ0A: assuming all files to be of ORC type
            checkState(hiveFileWriter instanceof OrcFileWriter, "Only support ORC format with snapshot");
            logContainingFolderInfo(fileSystem, path, "Creating file writer for final result: %s", path);
        }
        if (hiveFileWriter == null) {
            hiveFileWriter = new RecordFileWriter(path, dataColumns.stream().map(DataColumn::getName).collect(toList()), outputStorageFormat, schema, partitionStorageFormat.getEstimatedWriterSystemMemoryUsage(), conf, typeManager, parquetTimeZone, session);
        }
        if (isTxnTable) {
            hiveFileWriter.initWriter(true, path, fileSystem);
        }
    }
    Path finalPath = path;
    String writerImplementation = hiveFileWriter.getClass().getName();
    Consumer<HiveWriter> onCommit;
    if (isSnapshotEnabled && !forMerge) {
        // Only send "commit" event for the merged file
        onCommit = hiveWriter -> {
        };
    } else {
        onCommit = hiveWriter -> {
            Optional<Long> size;
            try {
                size = Optional.of(hdfsEnvironment.getFileSystem(session.getUser(), finalPath, conf).getFileStatus(finalPath).getLen());
            } catch (IOException | RuntimeException e) {
                // Do not fail the query if file system is not available
                size = Optional.empty();
            }
            eventClient.post(new WriteCompletedEvent(session.getQueryId(), finalPath.toString(), schemaName, tableName, partitionName.orElse(null), outputStorageFormat.getOutputFormat(), writerImplementation, nodeManager.getCurrentNode().getVersion(), nodeManager.getCurrentNode().getHost(), session.getIdentity().getPrincipal().map(Principal::getName).orElse(null), nodeManager.getEnvironment(), sessionProperties, size.orElse(null), hiveWriter.getRowCount()));
        };
    }
    if (!sortedBy.isEmpty() || (isTxnTable() && HiveACIDWriteType.isUpdateOrDelete(acidWriteType))) {
        List<Type> types = dataColumns.stream().map(column -> column.getHiveType().getType(typeManager)).collect(Collectors.toList());
        Map<String, Integer> columnIndexes = new HashMap<>();
        for (int i = 0; i < dataColumns.size(); i++) {
            columnIndexes.put(dataColumns.get(i).getName(), i);
        }
        if (sortedBy.isEmpty() && isTxnTable() && HiveACIDWriteType.isUpdateOrDelete(acidWriteType)) {
            // Add $rowId column as the last column in the page
            types.add(HiveColumnHandle.updateRowIdHandle().getHiveType().getType(typeManager));
            columnIndexes.put(HiveColumnHandle.UPDATE_ROW_ID_COLUMN_NAME, dataColumns.size());
        }
        List<Integer> sortFields = new ArrayList<>();
        List<SortOrder> sortOrders = new ArrayList<>();
        List<SortingColumn> sortigColumns = this.sortedBy;
        if (sortedBy.isEmpty() && isTxnTable() && HiveACIDWriteType.isUpdateOrDelete(acidWriteType)) {
            sortigColumns = ImmutableList.of(new SortingColumn(HiveColumnHandle.UPDATE_ROW_ID_COLUMN_NAME, SortingColumn.Order.ASCENDING));
        }
        for (SortingColumn column : sortigColumns) {
            Integer index = columnIndexes.get(column.getColumnName());
            if (index == null) {
                throw new PrestoException(HIVE_INVALID_METADATA, format("Sorting column '%s' does not exist in table '%s.%s'", column.getColumnName(), schemaName, tableName));
            }
            sortFields.add(index);
            sortOrders.add(column.getOrder().getSortOrder());
        }
        FileSystem sortFileSystem = fileSystem;
        String child = ".tmp-sort." + path.getName();
        Path tempFilePrefix = new Path(path.getParent(), child);
        hiveFileWriter = new SortingFileWriter(sortFileSystem, tempFilePrefix, hiveFileWriter, sortBufferSize, maxOpenSortFiles, types, sortFields, sortOrders, pageSorter, (fs, p) -> orcFileWriterFactory.createOrcDataSink(session, fs, p));
    }
    return new HiveWriter(hiveFileWriter, partitionName, updateMode, fileNameWithExtension, writeInfo.getWritePath().toString(), writeInfo.getTargetPath().toString(), path.toString(), onCommit, // Snapshot: only update stats when merging files
    isSnapshotEnabled && !forMerge ? null : hiveWriterStats, hiveFileWriter.getExtraPartitionFiles());
}
Also used : DateTimeZone(org.joda.time.DateTimeZone) Arrays(java.util.Arrays) StorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat) UpdateMode(io.prestosql.plugin.hive.PartitionUpdate.UpdateMode) FileSystem(org.apache.hadoop.fs.FileSystem) HIVE_FILESYSTEM_ERROR(io.prestosql.plugin.hive.HiveErrorCode.HIVE_FILESYSTEM_ERROR) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) FileStatus(org.apache.hadoop.fs.FileStatus) SortOrder(io.prestosql.spi.block.SortOrder) CompressionCodec(org.apache.hadoop.io.compress.CompressionCodec) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) HiveIgnoreKeyTextOutputFormat(org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat) Collectors.toMap(java.util.stream.Collectors.toMap) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) HiveUtil.getColumnNames(io.prestosql.plugin.hive.HiveUtil.getColumnNames) PropertyMetadata(io.prestosql.spi.session.PropertyMetadata) Path(org.apache.hadoop.fs.Path) HIVE_PARTITION_SCHEMA_MISMATCH(io.prestosql.plugin.hive.HiveErrorCode.HIVE_PARTITION_SCHEMA_MISMATCH) Type(io.prestosql.spi.type.Type) ENGLISH(java.util.Locale.ENGLISH) PrestoException(io.prestosql.spi.PrestoException) ImmutableSet(com.google.common.collect.ImmutableSet) HIVE_WRITER_OPEN_ERROR(io.prestosql.plugin.hive.HiveErrorCode.HIVE_WRITER_OPEN_ERROR) ImmutableMap(com.google.common.collect.ImmutableMap) EventClient(io.airlift.event.client.EventClient) HIVE_UNSUPPORTED_FORMAT(io.prestosql.plugin.hive.HiveErrorCode.HIVE_UNSUPPORTED_FORMAT) DefaultCodec(org.apache.hadoop.io.compress.DefaultCodec) Set(java.util.Set) HIVE_PATH_ALREADY_EXISTS(io.prestosql.plugin.hive.HiveErrorCode.HIVE_PATH_ALREADY_EXISTS) Math.min(java.lang.Math.min) Collectors(java.util.stream.Collectors) HiveWriteUtils.createPartitionValues(io.prestosql.plugin.hive.HiveWriteUtils.createPartitionValues) Sets(com.google.common.collect.Sets) String.format(java.lang.String.format) Collectors.joining(java.util.stream.Collectors.joining) Preconditions.checkState(com.google.common.base.Preconditions.checkState) ReflectionUtil(org.apache.hive.common.util.ReflectionUtil) DataSize(io.airlift.units.DataSize) List(java.util.List) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) Principal(java.security.Principal) Table(io.prestosql.plugin.hive.metastore.Table) HIVE_TABLE_READ_ONLY(io.prestosql.plugin.hive.HiveErrorCode.HIVE_TABLE_READ_ONLY) HdfsOrcDataSource(io.prestosql.plugin.hive.orc.HdfsOrcDataSource) Function.identity(java.util.function.Function.identity) FileUtils(org.apache.hadoop.hive.common.FileUtils) Optional(java.util.Optional) SortingColumn(io.prestosql.plugin.hive.metastore.SortingColumn) ConfigurationUtils.toJobConf(io.prestosql.plugin.hive.util.ConfigurationUtils.toJobConf) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) HivePageSinkMetadataProvider(io.prestosql.plugin.hive.metastore.HivePageSinkMetadataProvider) Partition(io.prestosql.plugin.hive.metastore.Partition) Logger(io.airlift.log.Logger) HiveUtil.getColumnTypes(io.prestosql.plugin.hive.HiveUtil.getColumnTypes) MEGABYTE(io.airlift.units.DataSize.Unit.MEGABYTE) HashMap(java.util.HashMap) StorageFormat.fromHiveStorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat.fromHiveStorageFormat) IOConstants(org.apache.hadoop.hive.ql.io.IOConstants) TempFileReader(io.prestosql.plugin.hive.util.TempFileReader) NOT_FOUND(io.prestosql.spi.StandardErrorCode.NOT_FOUND) OptionalInt(java.util.OptionalInt) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Strings(com.google.common.base.Strings) ImmutableList(com.google.common.collect.ImmutableList) COMPRESSRESULT(org.apache.hadoop.hive.conf.HiveConf.ConfVars.COMPRESSRESULT) Objects.requireNonNull(java.util.Objects.requireNonNull) HIVE_PARTITION_READ_ONLY(io.prestosql.plugin.hive.HiveErrorCode.HIVE_PARTITION_READ_ONLY) DIRECT_TO_TARGET_EXISTING_DIRECTORY(io.prestosql.plugin.hive.LocationHandle.WriteMode.DIRECT_TO_TARGET_EXISTING_DIRECTORY) Properties(java.util.Properties) InsertExistingPartitionsBehavior(io.prestosql.plugin.hive.HiveSessionProperties.InsertExistingPartitionsBehavior) HiveConf(org.apache.hadoop.hive.conf.HiveConf) TypeManager(io.prestosql.spi.type.TypeManager) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) HIVE_INVALID_METADATA(io.prestosql.plugin.hive.HiveErrorCode.HIVE_INVALID_METADATA) Page(io.prestosql.spi.Page) IOException(java.io.IOException) PageSorter(io.prestosql.spi.PageSorter) Options(org.apache.hadoop.hive.ql.io.AcidOutputFormat.Options) JobConf(org.apache.hadoop.mapred.JobConf) Consumer(java.util.function.Consumer) UUID.randomUUID(java.util.UUID.randomUUID) Collectors.toList(java.util.stream.Collectors.toList) MetastoreUtil.getHiveSchema(io.prestosql.plugin.hive.metastore.MetastoreUtil.getHiveSchema) Column(io.prestosql.plugin.hive.metastore.Column) NodeManager(io.prestosql.spi.NodeManager) WriteInfo(io.prestosql.plugin.hive.LocationService.WriteInfo) OrcDataSourceId(io.prestosql.orc.OrcDataSourceId) HashMap(java.util.HashMap) UpdateMode(io.prestosql.plugin.hive.PartitionUpdate.UpdateMode) ArrayList(java.util.ArrayList) WriteInfo(io.prestosql.plugin.hive.LocationService.WriteInfo) FileSystem(org.apache.hadoop.fs.FileSystem) Options(org.apache.hadoop.hive.ql.io.AcidOutputFormat.Options) Options(org.apache.hadoop.hive.ql.io.AcidOutputFormat.Options) PrestoException(io.prestosql.spi.PrestoException) StorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat) StorageFormat.fromHiveStorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat.fromHiveStorageFormat) Properties(java.util.Properties) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) SortingColumn(io.prestosql.plugin.hive.metastore.SortingColumn) Column(io.prestosql.plugin.hive.metastore.Column) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) Path(org.apache.hadoop.fs.Path) Partition(io.prestosql.plugin.hive.metastore.Partition) SortingColumn(io.prestosql.plugin.hive.metastore.SortingColumn) SortOrder(io.prestosql.spi.block.SortOrder) IOException(java.io.IOException) Type(io.prestosql.spi.type.Type)

Example 13 with StorageFormat

use of io.prestosql.plugin.hive.metastore.StorageFormat in project hetu-core by openlookeng.

the class AbstractTestHiveFileFormats method createTestFilePresto.

public static FileSplit createTestFilePresto(String filePath, HiveStorageFormat storageFormat, HiveCompressionCodec compressionCodec, List<TestColumn> columns, ConnectorSession session, int numRows, HiveFileWriterFactory fileWriterFactory) {
    List<TestColumn> testColumns = columns;
    // filter out partition keys, which are not written to the file
    testColumns = ImmutableList.copyOf(filter(testColumns, not(TestColumn::isPartitionKey)));
    List<Type> types = testColumns.stream().map(TestColumn::getType).map(HiveType::valueOf).map(type -> type.getType(TYPE_MANAGER)).collect(toList());
    PageBuilder pageBuilder = new PageBuilder(types);
    for (int rowNumber = 0; rowNumber < numRows; rowNumber++) {
        pageBuilder.declarePosition();
        for (int columnNumber = 0; columnNumber < testColumns.size(); columnNumber++) {
            serializeObject(types.get(columnNumber), pageBuilder.getBlockBuilder(columnNumber), testColumns.get(columnNumber).getWriteValue(), testColumns.get(columnNumber).getObjectInspector(), false);
        }
    }
    Page page = pageBuilder.build();
    JobConf jobConf = new JobConf();
    configureCompression(jobConf, compressionCodec);
    Properties tableProperties = new Properties();
    tableProperties.setProperty("columns", Joiner.on(',').join(transform(testColumns, TestColumn::getName)));
    tableProperties.setProperty("columns.types", Joiner.on(',').join(transform(testColumns, TestColumn::getType)));
    Optional<HiveFileWriter> fileWriter = fileWriterFactory.createFileWriter(new Path(filePath), testColumns.stream().map(TestColumn::getName).collect(toList()), StorageFormat.fromHiveStorageFormat(storageFormat), tableProperties, jobConf, session, Optional.empty(), Optional.empty());
    HiveFileWriter hiveFileWriter = fileWriter.orElseThrow(() -> new IllegalArgumentException("fileWriterFactory"));
    hiveFileWriter.appendRows(page);
    hiveFileWriter.commit();
    return new FileSplit(new Path(filePath), 0, new File(filePath).length(), new String[0]);
}
Also used : DateTimeZone(org.joda.time.DateTimeZone) Arrays(java.util.Arrays) StorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat) JavaHiveDecimalObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaHiveDecimalObjectInspector) PrimitiveObjectInspectorFactory.javaByteObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaByteObjectInspector) Text(org.apache.hadoop.io.Text) PrimitiveObjectInspectorFactory.javaLongObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaLongObjectInspector) Writable(org.apache.hadoop.io.Writable) Test(org.testng.annotations.Test) PrimitiveObjectInspectorFactory.javaTimestampObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaTimestampObjectInspector) Date(org.apache.hadoop.hive.common.type.Date) HiveTestUtils.isDistinctFrom(io.prestosql.plugin.hive.HiveTestUtils.isDistinctFrom) PrimitiveObjectInspectorFactory.javaDateObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaDateObjectInspector) HiveChar(org.apache.hadoop.hive.common.type.HiveChar) BigDecimal(java.math.BigDecimal) FileSplit(org.apache.hadoop.mapred.FileSplit) Predicates.not(com.google.common.base.Predicates.not) Slices(io.airlift.slice.Slices) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) RowType(io.prestosql.spi.type.RowType) BigInteger(java.math.BigInteger) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) Assert.assertFalse(org.testng.Assert.assertFalse) Chars.isCharType(io.prestosql.spi.type.Chars.isCharType) PrimitiveObjectInspectorFactory.javaByteArrayObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaByteArrayObjectInspector) PrimitiveObjectInspectorFactory.javaFloatObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaFloatObjectInspector) PrimitiveObjectInspectorFactory.javaDoubleObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaDoubleObjectInspector) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) PrimitiveObjectInspectorFactory.javaHiveVarcharObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaHiveVarcharObjectInspector) VarcharType.createVarcharType(io.prestosql.spi.type.VarcharType.createVarcharType) TimestampType(io.prestosql.spi.type.TimestampType) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) StructuralTestUtil.mapBlockOf(io.prestosql.tests.StructuralTestUtil.mapBlockOf) ConnectorPageSource(io.prestosql.spi.connector.ConnectorPageSource) Iterables.filter(com.google.common.collect.Iterables.filter) StructuralTestUtil.decimalMapBlockOf(io.prestosql.tests.StructuralTestUtil.decimalMapBlockOf) PrimitiveObjectInspectorFactory.javaIntObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaIntObjectInspector) Joiner(com.google.common.base.Joiner) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) JavaHiveCharObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaHiveCharObjectInspector) MethodHandle(java.lang.invoke.MethodHandle) Slice(io.airlift.slice.Slice) TYPE_MANAGER(io.prestosql.plugin.hive.HiveTestUtils.TYPE_MANAGER) StructuralTestUtil.decimalArrayBlockOf(io.prestosql.tests.StructuralTestUtil.decimalArrayBlockOf) CharType(io.prestosql.spi.type.CharType) StructuralTestUtil.rowBlockOf(io.prestosql.tests.StructuralTestUtil.rowBlockOf) ArrayList(java.util.ArrayList) REGULAR(io.prestosql.plugin.hive.HiveColumnHandle.ColumnType.REGULAR) PrimitiveObjectInspectorFactory.javaShortObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaShortObjectInspector) PARTITION_KEY(io.prestosql.plugin.hive.HiveColumnHandle.ColumnType.PARTITION_KEY) DOUBLE(io.prestosql.spi.type.DoubleType.DOUBLE) RecordCursor(io.prestosql.spi.connector.RecordCursor) StructuralTestUtil(io.prestosql.tests.StructuralTestUtil) SESSION(io.prestosql.plugin.hive.HiveTestUtils.SESSION) HiveTestUtils.mapType(io.prestosql.plugin.hive.HiveTestUtils.mapType) Arrays.fill(java.util.Arrays.fill) Properties(java.util.Properties) IOException(java.io.IOException) ObjectInspectorFactory.getStandardStructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.getStandardStructObjectInspector) UTC(org.joda.time.DateTimeZone.UTC) MaterializedRow(io.prestosql.testing.MaterializedRow) DecimalTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo) File(java.io.File) SettableStructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.SettableStructObjectInspector) VARBINARY(io.prestosql.spi.type.VarbinaryType.VARBINARY) DateTimeTestingUtils.sqlTimestampOf(io.prestosql.testing.DateTimeTestingUtils.sqlTimestampOf) HIVE_DEFAULT_DYNAMIC_PARTITION(io.prestosql.plugin.hive.HivePartitionKey.HIVE_DEFAULT_DYNAMIC_PARTITION) RecordWriter(org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter) Varchars.isVarcharType(io.prestosql.spi.type.Varchars.isVarcharType) Iterables.transform(com.google.common.collect.Iterables.transform) DecimalType(io.prestosql.spi.type.DecimalType) MaterializedResult.materializeSourceDataStream(io.prestosql.testing.MaterializedResult.materializeSourceDataStream) MaterializedResult(io.prestosql.testing.MaterializedResult) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) Path(org.apache.hadoop.fs.Path) BOOLEAN(io.prestosql.spi.type.BooleanType.BOOLEAN) Type(io.prestosql.spi.type.Type) BIGINT(io.prestosql.spi.type.BigintType.BIGINT) DateTimeFormat(org.joda.time.format.DateTimeFormat) ImmutableMap(com.google.common.collect.ImmutableMap) BlockBuilder(io.prestosql.spi.block.BlockBuilder) ArrayType(io.prestosql.spi.type.ArrayType) CharType.createCharType(io.prestosql.spi.type.CharType.createCharType) TINYINT(io.prestosql.spi.type.TinyintType.TINYINT) HiveOutputFormat(org.apache.hadoop.hive.ql.io.HiveOutputFormat) PageBuilder(io.prestosql.spi.PageBuilder) List(java.util.List) VarcharType.createUnboundedVarcharType(io.prestosql.spi.type.VarcharType.createUnboundedVarcharType) Function.identity(java.util.function.Function.identity) Optional(java.util.Optional) TypeInfoFactory.getCharTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory.getCharTypeInfo) SqlTimestamp(io.prestosql.spi.type.SqlTimestamp) Strings.padEnd(com.google.common.base.Strings.padEnd) HiveUtil.isStructuralType(io.prestosql.plugin.hive.HiveUtil.isStructuralType) PrimitiveObjectInspectorFactory.javaBooleanObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaBooleanObjectInspector) SqlDate(io.prestosql.spi.type.SqlDate) SerDeUtils.serializeObject(io.prestosql.plugin.hive.util.SerDeUtils.serializeObject) Assert.assertEquals(org.testng.Assert.assertEquals) Decimals(io.prestosql.spi.type.Decimals) HashMap(java.util.HashMap) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) SqlDecimal(io.prestosql.spi.type.SqlDecimal) HdfsConfigurationInitializer.configureCompression(io.prestosql.plugin.hive.HdfsConfigurationInitializer.configureCompression) Float.intBitsToFloat(java.lang.Float.intBitsToFloat) SqlVarbinary(io.prestosql.spi.type.SqlVarbinary) Category(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category) HiveVarchar(org.apache.hadoop.hive.common.type.HiveVarchar) ImmutableList(com.google.common.collect.ImmutableList) Objects.requireNonNull(java.util.Objects.requireNonNull) REAL(io.prestosql.spi.type.RealType.REAL) ObjectInspectorFactory.getStandardMapObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.getStandardMapObjectInspector) Block(io.prestosql.spi.block.Block) Timestamp(org.apache.hadoop.hive.common.type.Timestamp) UTF_8(java.nio.charset.StandardCharsets.UTF_8) DateTime(org.joda.time.DateTime) Page(io.prestosql.spi.Page) ObjectInspectorFactory.getStandardListObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.getStandardListObjectInspector) JobConf(org.apache.hadoop.mapred.JobConf) TimeUnit(java.util.concurrent.TimeUnit) Collectors.toList(java.util.stream.Collectors.toList) SMALLINT(io.prestosql.spi.type.SmallintType.SMALLINT) DateType(io.prestosql.spi.type.DateType) Serializer(org.apache.hadoop.hive.serde2.Serializer) HiveDecimal(org.apache.hadoop.hive.common.type.HiveDecimal) StructuralTestUtil.arrayBlockOf(io.prestosql.tests.StructuralTestUtil.arrayBlockOf) Assert.assertTrue(org.testng.Assert.assertTrue) PrimitiveObjectInspectorFactory.javaStringObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaStringObjectInspector) Path(org.apache.hadoop.fs.Path) Page(io.prestosql.spi.Page) PageBuilder(io.prestosql.spi.PageBuilder) Properties(java.util.Properties) FileSplit(org.apache.hadoop.mapred.FileSplit) RowType(io.prestosql.spi.type.RowType) Chars.isCharType(io.prestosql.spi.type.Chars.isCharType) VarcharType.createVarcharType(io.prestosql.spi.type.VarcharType.createVarcharType) TimestampType(io.prestosql.spi.type.TimestampType) CharType(io.prestosql.spi.type.CharType) HiveTestUtils.mapType(io.prestosql.plugin.hive.HiveTestUtils.mapType) Varchars.isVarcharType(io.prestosql.spi.type.Varchars.isVarcharType) DecimalType(io.prestosql.spi.type.DecimalType) Type(io.prestosql.spi.type.Type) ArrayType(io.prestosql.spi.type.ArrayType) CharType.createCharType(io.prestosql.spi.type.CharType.createCharType) VarcharType.createUnboundedVarcharType(io.prestosql.spi.type.VarcharType.createUnboundedVarcharType) HiveUtil.isStructuralType(io.prestosql.plugin.hive.HiveUtil.isStructuralType) DateType(io.prestosql.spi.type.DateType) JobConf(org.apache.hadoop.mapred.JobConf) File(java.io.File)

Example 14 with StorageFormat

use of io.prestosql.plugin.hive.metastore.StorageFormat in project hetu-core by openlookeng.

the class AbstractTestHive method doTestMismatchSchemaTable.

protected void doTestMismatchSchemaTable(SchemaTableName schemaTableName, HiveStorageFormat storageFormat, List<ColumnMetadata> tableBefore, MaterializedResult dataBefore, List<ColumnMetadata> tableAfter, MaterializedResult dataAfter) throws Exception {
    String schemaName = schemaTableName.getSchemaName();
    String tableName = schemaTableName.getTableName();
    doCreateEmptyTable(schemaTableName, storageFormat, tableBefore);
    // insert the data
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, schemaTableName);
        ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle);
        ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle);
        sink.appendPage(dataBefore.toPage());
        Collection<Slice> fragments = getFutureValue(sink.finish());
        metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
        transaction.commit();
    }
    // load the table and verify the data
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, schemaTableName);
        List<ColumnHandle> columnHandles = metadata.getColumnHandles(session, tableHandle).values().stream().filter(columnHandle -> !((HiveColumnHandle) columnHandle).isHidden()).collect(toList());
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
        assertEqualsIgnoreOrder(result.getMaterializedRows(), dataBefore.getMaterializedRows());
        transaction.commit();
    }
    // alter the table schema
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(session);
        Table oldTable = transaction.getMetastore(schemaName).getTable(new HiveIdentity(session), schemaName, tableName).get();
        HiveTypeTranslator hiveTypeTranslator = new HiveTypeTranslator();
        List<Column> dataColumns = tableAfter.stream().filter(columnMetadata -> !columnMetadata.getName().equals("ds")).map(columnMetadata -> new Column(columnMetadata.getName(), toHiveType(hiveTypeTranslator, columnMetadata.getType()), Optional.empty())).collect(toList());
        Table.Builder newTable = Table.builder(oldTable).setDataColumns(dataColumns);
        transaction.getMetastore(schemaName).replaceView(new HiveIdentity(session), schemaName, tableName, newTable.build(), principalPrivileges);
        transaction.commit();
    }
    // load the altered table and verify the data
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, schemaTableName);
        List<ColumnHandle> columnHandles = metadata.getColumnHandles(session, tableHandle).values().stream().filter(columnHandle -> !((HiveColumnHandle) columnHandle).isHidden()).collect(toList());
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
        assertEqualsIgnoreOrder(result.getMaterializedRows(), dataAfter.getMaterializedRows());
        transaction.commit();
    }
    // insertions to the partitions with type mismatches should fail
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, schemaTableName);
        ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle);
        ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle);
        sink.appendPage(dataAfter.toPage());
        Collection<Slice> fragments = getFutureValue(sink.finish());
        metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
        transaction.commit();
        fail("expected exception");
    } catch (PrestoException e) {
        // expected
        assertEquals(e.getErrorCode(), HiveErrorCode.HIVE_PARTITION_SCHEMA_MISMATCH.toErrorCode());
    }
}
Also used : ROLLBACK_AFTER_FINISH_INSERT(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_FINISH_INSERT) HiveType.toHiveType(io.prestosql.plugin.hive.HiveType.toHiveType) TableStatistics(io.prestosql.spi.statistics.TableStatistics) StorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat) Assertions.assertInstanceOf(io.airlift.testing.Assertions.assertInstanceOf) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.testng.annotations.Test) TableAlreadyExistsException(io.prestosql.spi.connector.TableAlreadyExistsException) NullableValue(io.prestosql.spi.predicate.NullableValue) FileStatus(org.apache.hadoop.fs.FileStatus) TEXTFILE(io.prestosql.plugin.hive.HiveStorageFormat.TEXTFILE) TypeSignature.parseTypeSignature(io.prestosql.spi.type.TypeSignature.parseTypeSignature) TableNotFoundException(io.prestosql.spi.connector.TableNotFoundException) Files.createTempDirectory(java.nio.file.Files.createTempDirectory) Map(java.util.Map) RowType(io.prestosql.spi.type.RowType) ENGLISH(java.util.Locale.ENGLISH) Assert.assertFalse(org.testng.Assert.assertFalse) Chars.isCharType(io.prestosql.spi.type.Chars.isCharType) LOCATION_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.LOCATION_PROPERTY) MoreExecutors.directExecutor(com.google.common.util.concurrent.MoreExecutors.directExecutor) RCTEXT(io.prestosql.plugin.hive.HiveStorageFormat.RCTEXT) ConnectorPageSource(io.prestosql.spi.connector.ConnectorPageSource) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) Table(io.prestosql.plugin.hive.metastore.Table) ORC(io.prestosql.plugin.hive.HiveStorageFormat.ORC) SchemaTablePrefix(io.prestosql.spi.connector.SchemaTablePrefix) HiveBasicStatistics.createZeroStatistics(io.prestosql.plugin.hive.HiveBasicStatistics.createZeroStatistics) TRANSACTIONAL(io.prestosql.plugin.hive.HiveTableProperties.TRANSACTIONAL) TYPE_MANAGER(io.prestosql.plugin.hive.HiveTestUtils.TYPE_MANAGER) MetastoreLocator(io.prestosql.plugin.hive.metastore.thrift.MetastoreLocator) LocalDateTime(java.time.LocalDateTime) PRESTO_QUERY_ID_NAME(io.prestosql.plugin.hive.HiveMetadata.PRESTO_QUERY_ID_NAME) ThriftHiveMetastoreConfig(io.prestosql.plugin.hive.metastore.thrift.ThriftHiveMetastoreConfig) OptionalLong(java.util.OptionalLong) REGULAR(io.prestosql.plugin.hive.HiveColumnHandle.ColumnType.REGULAR) PARTITION_KEY(io.prestosql.plugin.hive.HiveColumnHandle.ColumnType.PARTITION_KEY) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) DOUBLE(io.prestosql.spi.type.DoubleType.DOUBLE) ThriftHiveMetastore(io.prestosql.plugin.hive.metastore.thrift.ThriftHiveMetastore) DiscretePredicates(io.prestosql.spi.connector.DiscretePredicates) Assertions.assertGreaterThanOrEqual(io.airlift.testing.Assertions.assertGreaterThanOrEqual) ImmutableMultimap(com.google.common.collect.ImmutableMultimap) PARQUET(io.prestosql.plugin.hive.HiveStorageFormat.PARQUET) ConnectorOutputTableHandle(io.prestosql.spi.connector.ConnectorOutputTableHandle) AfterClass(org.testng.annotations.AfterClass) HiveTestUtils.mapType(io.prestosql.plugin.hive.HiveTestUtils.mapType) FileUtils.makePartName(org.apache.hadoop.hive.common.FileUtils.makePartName) IOException(java.io.IOException) Iterables.getOnlyElement(com.google.common.collect.Iterables.getOnlyElement) ROLLBACK_RIGHT_AWAY(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_RIGHT_AWAY) HostAndPort(com.google.common.net.HostAndPort) USER(io.prestosql.spi.security.PrincipalType.USER) ConnectorTableMetadata(io.prestosql.spi.connector.ConnectorTableMetadata) VARBINARY(io.prestosql.spi.type.VarbinaryType.VARBINARY) HiveTestUtils.getDefaultOrcFileWriterFactory(io.prestosql.plugin.hive.HiveTestUtils.getDefaultOrcFileWriterFactory) ConnectorPageSourceProvider(io.prestosql.spi.connector.ConnectorPageSourceProvider) ROLLBACK_AFTER_APPEND_PAGE(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_APPEND_PAGE) Varchars.isVarcharType(io.prestosql.spi.type.Varchars.isVarcharType) ConnectorSplitManager(io.prestosql.spi.connector.ConnectorSplitManager) ViewNotFoundException(io.prestosql.spi.connector.ViewNotFoundException) MaterializedResult.materializeSourceDataStream(io.prestosql.testing.MaterializedResult.materializeSourceDataStream) MaterializedResult(io.prestosql.testing.MaterializedResult) Duration(io.airlift.units.Duration) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) ConnectorTableProperties(io.prestosql.spi.connector.ConnectorTableProperties) BOOLEAN(io.prestosql.spi.type.BooleanType.BOOLEAN) Type(io.prestosql.spi.type.Type) RcFilePageSource(io.prestosql.plugin.hive.rcfile.RcFilePageSource) BIGINT(io.prestosql.spi.type.BigintType.BIGINT) DecimalType.createDecimalType(io.prestosql.spi.type.DecimalType.createDecimalType) PrestoException(io.prestosql.spi.PrestoException) HiveBasicStatistics.createEmptyStatistics(io.prestosql.plugin.hive.HiveBasicStatistics.createEmptyStatistics) ImmutableSet(com.google.common.collect.ImmutableSet) CachingHiveMetastore(io.prestosql.plugin.hive.metastore.CachingHiveMetastore) MetadataManager.createTestMetadataManager(io.prestosql.metadata.MetadataManager.createTestMetadataManager) ROLLBACK_AFTER_DELETE(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_DELETE) HiveUtil.columnExtraInfo(io.prestosql.plugin.hive.HiveUtil.columnExtraInfo) BeforeClass(org.testng.annotations.BeforeClass) Collection(java.util.Collection) UUID(java.util.UUID) TINYINT(io.prestosql.spi.type.TinyintType.TINYINT) Assert.assertNotNull(org.testng.Assert.assertNotNull) HYPER_LOG_LOG(io.prestosql.spi.type.HyperLogLogType.HYPER_LOG_LOG) NOT_SUPPORTED(io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED) JsonCodec(io.airlift.json.JsonCodec) IntStream(java.util.stream.IntStream) NOT_PARTITIONED(io.prestosql.spi.connector.NotPartitionedPartitionHandle.NOT_PARTITIONED) SqlTimestamp(io.prestosql.spi.type.SqlTimestamp) BUCKET_COLUMN_NAME(io.prestosql.plugin.hive.HiveColumnHandle.BUCKET_COLUMN_NAME) Assert.assertNull(org.testng.Assert.assertNull) ConnectorViewDefinition(io.prestosql.spi.connector.ConnectorViewDefinition) SqlDate(io.prestosql.spi.type.SqlDate) ConnectorNewTableLayout(io.prestosql.spi.connector.ConnectorNewTableLayout) OptionalDouble(java.util.OptionalDouble) Assert.assertEquals(org.testng.Assert.assertEquals) BUCKETED_BY_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.BUCKETED_BY_PROPERTY) HiveUtil.toPartitionValues(io.prestosql.plugin.hive.HiveUtil.toPartitionValues) OptionalInt(java.util.OptionalInt) HashSet(java.util.HashSet) ImmutableList(com.google.common.collect.ImmutableList) ViewColumn(io.prestosql.spi.connector.ConnectorViewDefinition.ViewColumn) HiveColumnHandle.bucketColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle.bucketColumnHandle) DATE(io.prestosql.spi.type.DateType.DATE) Math.toIntExact(java.lang.Math.toIntExact) STORAGE_FORMAT_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.STORAGE_FORMAT_PROPERTY) Block(io.prestosql.spi.block.Block) ExecutorService(java.util.concurrent.ExecutorService) Collections.emptyMap(java.util.Collections.emptyMap) ParquetPageSource(io.prestosql.plugin.hive.parquet.ParquetPageSource) UTF_8(java.nio.charset.StandardCharsets.UTF_8) ColumnMetadata(io.prestosql.spi.connector.ColumnMetadata) TupleDomain(io.prestosql.spi.predicate.TupleDomain) Assert.fail(org.testng.Assert.fail) DateTime(org.joda.time.DateTime) PartitionWithStatistics(io.prestosql.plugin.hive.metastore.PartitionWithStatistics) Page(io.prestosql.spi.Page) HiveTestUtils.getDefaultHiveDataStreamFactories(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveDataStreamFactories) Executors.newFixedThreadPool(java.util.concurrent.Executors.newFixedThreadPool) Hashing.sha256(com.google.common.hash.Hashing.sha256) BUCKETING_V1(io.prestosql.plugin.hive.HiveBucketing.BucketingVersion.BUCKETING_V1) Assertions.assertEqualsIgnoreOrder(io.airlift.testing.Assertions.assertEqualsIgnoreOrder) PARTITIONED_BY_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.PARTITIONED_BY_PROPERTY) Collectors.toList(java.util.stream.Collectors.toList) Column(io.prestosql.plugin.hive.metastore.Column) JoinCompiler(io.prestosql.sql.gen.JoinCompiler) Assert.assertTrue(org.testng.Assert.assertTrue) RecordPageSource(io.prestosql.spi.connector.RecordPageSource) ConnectorInsertTableHandle(io.prestosql.spi.connector.ConnectorInsertTableHandle) OrcConcatPageSource(io.prestosql.plugin.hive.orc.OrcConcatPageSource) ROLLBACK_AFTER_BEGIN_INSERT(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_BEGIN_INSERT) Arrays(java.util.Arrays) RCBINARY(io.prestosql.plugin.hive.HiveStorageFormat.RCBINARY) NoHdfsAuthentication(io.prestosql.plugin.hive.authentication.NoHdfsAuthentication) ConnectorPageSink(io.prestosql.spi.connector.ConnectorPageSink) ValueSet(io.prestosql.spi.predicate.ValueSet) Maps.uniqueIndex(com.google.common.collect.Maps.uniqueIndex) BigDecimal(java.math.BigDecimal) Sets.difference(com.google.common.collect.Sets.difference) Executors.newScheduledThreadPool(java.util.concurrent.Executors.newScheduledThreadPool) HIVE_STRING(io.prestosql.plugin.hive.HiveType.HIVE_STRING) RowFieldName(io.prestosql.spi.type.RowFieldName) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) ConnectorPageSinkProvider(io.prestosql.spi.connector.ConnectorPageSinkProvider) JSON(io.prestosql.plugin.hive.HiveStorageFormat.JSON) HIVE_INT(io.prestosql.plugin.hive.HiveType.HIVE_INT) HIVE_LONG(io.prestosql.plugin.hive.HiveType.HIVE_LONG) ConstraintApplicationResult(io.prestosql.spi.connector.ConstraintApplicationResult) UNGROUPED_SCHEDULING(io.prestosql.spi.connector.ConnectorSplitManager.SplitSchedulingStrategy.UNGROUPED_SCHEDULING) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) Set(java.util.Set) SqlStandardAccessControlMetadata(io.prestosql.plugin.hive.security.SqlStandardAccessControlMetadata) TIMESTAMP(io.prestosql.spi.type.TimestampType.TIMESTAMP) MILLISECONDS(java.util.concurrent.TimeUnit.MILLISECONDS) VarcharType.createVarcharType(io.prestosql.spi.type.VarcharType.createVarcharType) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) Domain(io.prestosql.spi.predicate.Domain) SortingColumn(io.prestosql.plugin.hive.metastore.SortingColumn) TestingNodeManager(io.prestosql.testing.TestingNodeManager) Lists.reverse(com.google.common.collect.Lists.reverse) MoreObjects.toStringHelper(com.google.common.base.MoreObjects.toStringHelper) Slice(io.airlift.slice.Slice) Partition(io.prestosql.plugin.hive.metastore.Partition) StandardTypes(io.prestosql.spi.type.StandardTypes) ConnectorSplit(io.prestosql.spi.connector.ConnectorSplit) BUCKET_COUNT_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.BUCKET_COUNT_PROPERTY) MapType(io.prestosql.spi.type.MapType) GroupByHashPageIndexerFactory(io.prestosql.GroupByHashPageIndexerFactory) Float.floatToRawIntBits(java.lang.Float.floatToRawIntBits) VARCHAR(io.prestosql.spi.type.VarcharType.VARCHAR) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) AVRO(io.prestosql.plugin.hive.HiveStorageFormat.AVRO) HiveTestUtils.rowType(io.prestosql.plugin.hive.HiveTestUtils.rowType) RecordCursor(io.prestosql.spi.connector.RecordCursor) SemiTransactionalHiveMetastore(io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore) SESSION(io.prestosql.plugin.hive.HiveTestUtils.SESSION) HiveMetastore(io.prestosql.plugin.hive.metastore.HiveMetastore) LongStream(java.util.stream.LongStream) MULTIDELIMIT(io.prestosql.plugin.hive.HiveStorageFormat.MULTIDELIMIT) MoreFutures.getFutureValue(io.airlift.concurrent.MoreFutures.getFutureValue) PAGE_SORTER(io.prestosql.plugin.hive.HiveTestUtils.PAGE_SORTER) UTC(org.joda.time.DateTimeZone.UTC) MaterializedRow(io.prestosql.testing.MaterializedRow) PrincipalPrivileges(io.prestosql.plugin.hive.metastore.PrincipalPrivileges) IS_EXTERNAL_TABLE(io.prestosql.plugin.hive.HiveTableProperties.IS_EXTERNAL_TABLE) HiveColumnStatistics(io.prestosql.plugin.hive.metastore.HiveColumnStatistics) DateTimeTestingUtils.sqlTimestampOf(io.prestosql.testing.DateTimeTestingUtils.sqlTimestampOf) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) STAGE_AND_MOVE_TO_TARGET_DIRECTORY(io.prestosql.plugin.hive.LocationHandle.WriteMode.STAGE_AND_MOVE_TO_TARGET_DIRECTORY) TableType(org.apache.hadoop.hive.metastore.TableType) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata) OrcPageSource(io.prestosql.plugin.hive.orc.OrcPageSource) HiveTestUtils.getDefaultHiveSelectiveFactories(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveSelectiveFactories) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) Assertions.assertGreaterThan(io.airlift.testing.Assertions.assertGreaterThan) MoreCollectors.onlyElement(com.google.common.collect.MoreCollectors.onlyElement) Iterables.concat(com.google.common.collect.Iterables.concat) HiveWriteUtils.createDirectory(io.prestosql.plugin.hive.HiveWriteUtils.createDirectory) Path(org.apache.hadoop.fs.Path) KILOBYTE(io.airlift.units.DataSize.Unit.KILOBYTE) Constraint(io.prestosql.spi.connector.Constraint) ImmutableMap(com.google.common.collect.ImmutableMap) ArrayType(io.prestosql.spi.type.ArrayType) CharType.createCharType(io.prestosql.spi.type.CharType.createCharType) ConnectorSplitSource(io.prestosql.spi.connector.ConnectorSplitSource) HiveTestUtils.getDefaultHiveFileWriterFactories(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveFileWriterFactories) PRESTO_VERSION_NAME(io.prestosql.plugin.hive.HiveMetadata.PRESTO_VERSION_NAME) String.format(java.lang.String.format) Preconditions.checkState(com.google.common.base.Preconditions.checkState) DataSize(io.airlift.units.DataSize) List(java.util.List) VarcharType.createUnboundedVarcharType(io.prestosql.spi.type.VarcharType.createUnboundedVarcharType) HiveTestUtils.getTypes(io.prestosql.plugin.hive.HiveTestUtils.getTypes) ConnectorTransactionHandle(io.prestosql.spi.connector.ConnectorTransactionHandle) Optional(java.util.Optional) SORTED_BY_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.SORTED_BY_PROPERTY) Logger(io.airlift.log.Logger) CounterStat(io.airlift.stats.CounterStat) HashMap(java.util.HashMap) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) HivePrincipal(io.prestosql.plugin.hive.metastore.HivePrincipal) AtomicReference(java.util.concurrent.atomic.AtomicReference) SqlVarbinary(io.prestosql.spi.type.SqlVarbinary) BridgingHiveMetastore(io.prestosql.plugin.hive.metastore.thrift.BridgingHiveMetastore) NamedTypeSignature(io.prestosql.spi.type.NamedTypeSignature) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) COMMIT(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.COMMIT) TestingMetastoreLocator(io.prestosql.plugin.hive.metastore.thrift.TestingMetastoreLocator) Verify.verify(com.google.common.base.Verify.verify) Assertions.assertLessThanOrEqual(io.airlift.testing.Assertions.assertLessThanOrEqual) Range(io.prestosql.spi.predicate.Range) Threads.daemonThreadsNamed(io.airlift.concurrent.Threads.daemonThreadsNamed) HivePrivilegeInfo(io.prestosql.plugin.hive.metastore.HivePrivilegeInfo) Objects.requireNonNull(java.util.Objects.requireNonNull) SEQUENCEFILE(io.prestosql.plugin.hive.HiveStorageFormat.SEQUENCEFILE) REAL(io.prestosql.spi.type.RealType.REAL) HiveMetadata.convertToPredicate(io.prestosql.plugin.hive.HiveMetadata.convertToPredicate) ColumnStatistics(io.prestosql.spi.statistics.ColumnStatistics) HiveTestUtils.getNoOpIndexCache(io.prestosql.plugin.hive.HiveTestUtils.getNoOpIndexCache) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) TRANSACTION_CONFLICT(io.prestosql.spi.StandardErrorCode.TRANSACTION_CONFLICT) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) CSV(io.prestosql.plugin.hive.HiveStorageFormat.CSV) HiveTestUtils.getDefaultHiveRecordCursorProvider(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveRecordCursorProvider) HiveTestUtils.arrayType(io.prestosql.plugin.hive.HiveTestUtils.arrayType) SMALLINT(io.prestosql.spi.type.SmallintType.SMALLINT) Executors.newCachedThreadPool(java.util.concurrent.Executors.newCachedThreadPool) ROLLBACK_AFTER_SINK_FINISH(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_SINK_FINISH) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) HiveColumnHandle.bucketColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) Table(io.prestosql.plugin.hive.metastore.Table) PrincipalPrivileges(io.prestosql.plugin.hive.metastore.PrincipalPrivileges) ConnectorInsertTableHandle(io.prestosql.spi.connector.ConnectorInsertTableHandle) PrestoException(io.prestosql.spi.PrestoException) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) ViewColumn(io.prestosql.spi.connector.ConnectorViewDefinition.ViewColumn) Column(io.prestosql.plugin.hive.metastore.Column) SortingColumn(io.prestosql.plugin.hive.metastore.SortingColumn) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) Slice(io.airlift.slice.Slice) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata) ConnectorPageSink(io.prestosql.spi.connector.ConnectorPageSink) MaterializedResult(io.prestosql.testing.MaterializedResult)

Example 15 with StorageFormat

use of io.prestosql.plugin.hive.metastore.StorageFormat in project hetu-core by openlookeng.

the class AbstractTestHive method doCreateEmptyTable.

protected void doCreateEmptyTable(SchemaTableName tableName, HiveStorageFormat storageFormat, List<ColumnMetadata> createTableColumns) throws Exception {
    List<String> partitionedBy = createTableColumns.stream().filter(column -> column.getName().equals("ds")).map(ColumnMetadata::getName).collect(toList());
    String queryId;
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        queryId = session.getQueryId();
        ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, createTableColumns, createTableProperties(storageFormat, partitionedBy));
        metadata.createTable(session, tableMetadata, false);
        transaction.commit();
    }
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        // load the new table
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        // verify the metadata
        ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, getTableHandle(metadata, tableName));
        List<ColumnMetadata> expectedColumns = createTableColumns.stream().map(column -> new ColumnMetadata(column.getName(), column.getType(), true, column.getComment(), columnExtraInfo(partitionedBy.contains(column.getName())), false, emptyMap(), partitionedBy.contains(column.getName()))).collect(toList());
        assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), expectedColumns);
        // verify table format
        Table table = transaction.getMetastore(tableName.getSchemaName()).getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()).get();
        assertEquals(table.getStorage().getStorageFormat().getInputFormat(), storageFormat.getInputFormat());
        // verify the node version and query ID
        assertEquals(table.getParameters().get(PRESTO_VERSION_NAME), TEST_SERVER_VERSION);
        assertEquals(table.getParameters().get(PRESTO_QUERY_ID_NAME), queryId);
        // verify the table is empty
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
        assertEquals(result.getRowCount(), 0);
        // verify basic statistics
        if (partitionedBy.isEmpty()) {
            HiveBasicStatistics statistics = getBasicStatisticsForTable(session, transaction, tableName);
            assertEquals(statistics.getRowCount().getAsLong(), 0L);
            assertEquals(statistics.getFileCount().getAsLong(), 0L);
            assertEquals(statistics.getInMemoryDataSizeInBytes().getAsLong(), 0L);
            assertEquals(statistics.getOnDiskDataSizeInBytes().getAsLong(), 0L);
        }
    }
}
Also used : ROLLBACK_AFTER_FINISH_INSERT(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_FINISH_INSERT) HiveType.toHiveType(io.prestosql.plugin.hive.HiveType.toHiveType) TableStatistics(io.prestosql.spi.statistics.TableStatistics) StorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat) Assertions.assertInstanceOf(io.airlift.testing.Assertions.assertInstanceOf) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.testng.annotations.Test) TableAlreadyExistsException(io.prestosql.spi.connector.TableAlreadyExistsException) NullableValue(io.prestosql.spi.predicate.NullableValue) FileStatus(org.apache.hadoop.fs.FileStatus) TEXTFILE(io.prestosql.plugin.hive.HiveStorageFormat.TEXTFILE) TypeSignature.parseTypeSignature(io.prestosql.spi.type.TypeSignature.parseTypeSignature) TableNotFoundException(io.prestosql.spi.connector.TableNotFoundException) Files.createTempDirectory(java.nio.file.Files.createTempDirectory) Map(java.util.Map) RowType(io.prestosql.spi.type.RowType) ENGLISH(java.util.Locale.ENGLISH) Assert.assertFalse(org.testng.Assert.assertFalse) Chars.isCharType(io.prestosql.spi.type.Chars.isCharType) LOCATION_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.LOCATION_PROPERTY) MoreExecutors.directExecutor(com.google.common.util.concurrent.MoreExecutors.directExecutor) RCTEXT(io.prestosql.plugin.hive.HiveStorageFormat.RCTEXT) ConnectorPageSource(io.prestosql.spi.connector.ConnectorPageSource) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) Table(io.prestosql.plugin.hive.metastore.Table) ORC(io.prestosql.plugin.hive.HiveStorageFormat.ORC) SchemaTablePrefix(io.prestosql.spi.connector.SchemaTablePrefix) HiveBasicStatistics.createZeroStatistics(io.prestosql.plugin.hive.HiveBasicStatistics.createZeroStatistics) TRANSACTIONAL(io.prestosql.plugin.hive.HiveTableProperties.TRANSACTIONAL) TYPE_MANAGER(io.prestosql.plugin.hive.HiveTestUtils.TYPE_MANAGER) MetastoreLocator(io.prestosql.plugin.hive.metastore.thrift.MetastoreLocator) LocalDateTime(java.time.LocalDateTime) PRESTO_QUERY_ID_NAME(io.prestosql.plugin.hive.HiveMetadata.PRESTO_QUERY_ID_NAME) ThriftHiveMetastoreConfig(io.prestosql.plugin.hive.metastore.thrift.ThriftHiveMetastoreConfig) OptionalLong(java.util.OptionalLong) REGULAR(io.prestosql.plugin.hive.HiveColumnHandle.ColumnType.REGULAR) PARTITION_KEY(io.prestosql.plugin.hive.HiveColumnHandle.ColumnType.PARTITION_KEY) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) DOUBLE(io.prestosql.spi.type.DoubleType.DOUBLE) ThriftHiveMetastore(io.prestosql.plugin.hive.metastore.thrift.ThriftHiveMetastore) DiscretePredicates(io.prestosql.spi.connector.DiscretePredicates) Assertions.assertGreaterThanOrEqual(io.airlift.testing.Assertions.assertGreaterThanOrEqual) ImmutableMultimap(com.google.common.collect.ImmutableMultimap) PARQUET(io.prestosql.plugin.hive.HiveStorageFormat.PARQUET) ConnectorOutputTableHandle(io.prestosql.spi.connector.ConnectorOutputTableHandle) AfterClass(org.testng.annotations.AfterClass) HiveTestUtils.mapType(io.prestosql.plugin.hive.HiveTestUtils.mapType) FileUtils.makePartName(org.apache.hadoop.hive.common.FileUtils.makePartName) IOException(java.io.IOException) Iterables.getOnlyElement(com.google.common.collect.Iterables.getOnlyElement) ROLLBACK_RIGHT_AWAY(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_RIGHT_AWAY) HostAndPort(com.google.common.net.HostAndPort) USER(io.prestosql.spi.security.PrincipalType.USER) ConnectorTableMetadata(io.prestosql.spi.connector.ConnectorTableMetadata) VARBINARY(io.prestosql.spi.type.VarbinaryType.VARBINARY) HiveTestUtils.getDefaultOrcFileWriterFactory(io.prestosql.plugin.hive.HiveTestUtils.getDefaultOrcFileWriterFactory) ConnectorPageSourceProvider(io.prestosql.spi.connector.ConnectorPageSourceProvider) ROLLBACK_AFTER_APPEND_PAGE(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_APPEND_PAGE) Varchars.isVarcharType(io.prestosql.spi.type.Varchars.isVarcharType) ConnectorSplitManager(io.prestosql.spi.connector.ConnectorSplitManager) ViewNotFoundException(io.prestosql.spi.connector.ViewNotFoundException) MaterializedResult.materializeSourceDataStream(io.prestosql.testing.MaterializedResult.materializeSourceDataStream) MaterializedResult(io.prestosql.testing.MaterializedResult) Duration(io.airlift.units.Duration) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) ConnectorTableProperties(io.prestosql.spi.connector.ConnectorTableProperties) BOOLEAN(io.prestosql.spi.type.BooleanType.BOOLEAN) Type(io.prestosql.spi.type.Type) RcFilePageSource(io.prestosql.plugin.hive.rcfile.RcFilePageSource) BIGINT(io.prestosql.spi.type.BigintType.BIGINT) DecimalType.createDecimalType(io.prestosql.spi.type.DecimalType.createDecimalType) PrestoException(io.prestosql.spi.PrestoException) HiveBasicStatistics.createEmptyStatistics(io.prestosql.plugin.hive.HiveBasicStatistics.createEmptyStatistics) ImmutableSet(com.google.common.collect.ImmutableSet) CachingHiveMetastore(io.prestosql.plugin.hive.metastore.CachingHiveMetastore) MetadataManager.createTestMetadataManager(io.prestosql.metadata.MetadataManager.createTestMetadataManager) ROLLBACK_AFTER_DELETE(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_DELETE) HiveUtil.columnExtraInfo(io.prestosql.plugin.hive.HiveUtil.columnExtraInfo) BeforeClass(org.testng.annotations.BeforeClass) Collection(java.util.Collection) UUID(java.util.UUID) TINYINT(io.prestosql.spi.type.TinyintType.TINYINT) Assert.assertNotNull(org.testng.Assert.assertNotNull) HYPER_LOG_LOG(io.prestosql.spi.type.HyperLogLogType.HYPER_LOG_LOG) NOT_SUPPORTED(io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED) JsonCodec(io.airlift.json.JsonCodec) IntStream(java.util.stream.IntStream) NOT_PARTITIONED(io.prestosql.spi.connector.NotPartitionedPartitionHandle.NOT_PARTITIONED) SqlTimestamp(io.prestosql.spi.type.SqlTimestamp) BUCKET_COLUMN_NAME(io.prestosql.plugin.hive.HiveColumnHandle.BUCKET_COLUMN_NAME) Assert.assertNull(org.testng.Assert.assertNull) ConnectorViewDefinition(io.prestosql.spi.connector.ConnectorViewDefinition) SqlDate(io.prestosql.spi.type.SqlDate) ConnectorNewTableLayout(io.prestosql.spi.connector.ConnectorNewTableLayout) OptionalDouble(java.util.OptionalDouble) Assert.assertEquals(org.testng.Assert.assertEquals) BUCKETED_BY_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.BUCKETED_BY_PROPERTY) HiveUtil.toPartitionValues(io.prestosql.plugin.hive.HiveUtil.toPartitionValues) OptionalInt(java.util.OptionalInt) HashSet(java.util.HashSet) ImmutableList(com.google.common.collect.ImmutableList) ViewColumn(io.prestosql.spi.connector.ConnectorViewDefinition.ViewColumn) HiveColumnHandle.bucketColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle.bucketColumnHandle) DATE(io.prestosql.spi.type.DateType.DATE) Math.toIntExact(java.lang.Math.toIntExact) STORAGE_FORMAT_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.STORAGE_FORMAT_PROPERTY) Block(io.prestosql.spi.block.Block) ExecutorService(java.util.concurrent.ExecutorService) Collections.emptyMap(java.util.Collections.emptyMap) ParquetPageSource(io.prestosql.plugin.hive.parquet.ParquetPageSource) UTF_8(java.nio.charset.StandardCharsets.UTF_8) ColumnMetadata(io.prestosql.spi.connector.ColumnMetadata) TupleDomain(io.prestosql.spi.predicate.TupleDomain) Assert.fail(org.testng.Assert.fail) DateTime(org.joda.time.DateTime) PartitionWithStatistics(io.prestosql.plugin.hive.metastore.PartitionWithStatistics) Page(io.prestosql.spi.Page) HiveTestUtils.getDefaultHiveDataStreamFactories(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveDataStreamFactories) Executors.newFixedThreadPool(java.util.concurrent.Executors.newFixedThreadPool) Hashing.sha256(com.google.common.hash.Hashing.sha256) BUCKETING_V1(io.prestosql.plugin.hive.HiveBucketing.BucketingVersion.BUCKETING_V1) Assertions.assertEqualsIgnoreOrder(io.airlift.testing.Assertions.assertEqualsIgnoreOrder) PARTITIONED_BY_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.PARTITIONED_BY_PROPERTY) Collectors.toList(java.util.stream.Collectors.toList) Column(io.prestosql.plugin.hive.metastore.Column) JoinCompiler(io.prestosql.sql.gen.JoinCompiler) Assert.assertTrue(org.testng.Assert.assertTrue) RecordPageSource(io.prestosql.spi.connector.RecordPageSource) ConnectorInsertTableHandle(io.prestosql.spi.connector.ConnectorInsertTableHandle) OrcConcatPageSource(io.prestosql.plugin.hive.orc.OrcConcatPageSource) ROLLBACK_AFTER_BEGIN_INSERT(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_BEGIN_INSERT) Arrays(java.util.Arrays) RCBINARY(io.prestosql.plugin.hive.HiveStorageFormat.RCBINARY) NoHdfsAuthentication(io.prestosql.plugin.hive.authentication.NoHdfsAuthentication) ConnectorPageSink(io.prestosql.spi.connector.ConnectorPageSink) ValueSet(io.prestosql.spi.predicate.ValueSet) Maps.uniqueIndex(com.google.common.collect.Maps.uniqueIndex) BigDecimal(java.math.BigDecimal) Sets.difference(com.google.common.collect.Sets.difference) Executors.newScheduledThreadPool(java.util.concurrent.Executors.newScheduledThreadPool) HIVE_STRING(io.prestosql.plugin.hive.HiveType.HIVE_STRING) RowFieldName(io.prestosql.spi.type.RowFieldName) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) ConnectorPageSinkProvider(io.prestosql.spi.connector.ConnectorPageSinkProvider) JSON(io.prestosql.plugin.hive.HiveStorageFormat.JSON) HIVE_INT(io.prestosql.plugin.hive.HiveType.HIVE_INT) HIVE_LONG(io.prestosql.plugin.hive.HiveType.HIVE_LONG) ConstraintApplicationResult(io.prestosql.spi.connector.ConstraintApplicationResult) UNGROUPED_SCHEDULING(io.prestosql.spi.connector.ConnectorSplitManager.SplitSchedulingStrategy.UNGROUPED_SCHEDULING) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) Set(java.util.Set) SqlStandardAccessControlMetadata(io.prestosql.plugin.hive.security.SqlStandardAccessControlMetadata) TIMESTAMP(io.prestosql.spi.type.TimestampType.TIMESTAMP) MILLISECONDS(java.util.concurrent.TimeUnit.MILLISECONDS) VarcharType.createVarcharType(io.prestosql.spi.type.VarcharType.createVarcharType) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) Domain(io.prestosql.spi.predicate.Domain) SortingColumn(io.prestosql.plugin.hive.metastore.SortingColumn) TestingNodeManager(io.prestosql.testing.TestingNodeManager) Lists.reverse(com.google.common.collect.Lists.reverse) MoreObjects.toStringHelper(com.google.common.base.MoreObjects.toStringHelper) Slice(io.airlift.slice.Slice) Partition(io.prestosql.plugin.hive.metastore.Partition) StandardTypes(io.prestosql.spi.type.StandardTypes) ConnectorSplit(io.prestosql.spi.connector.ConnectorSplit) BUCKET_COUNT_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.BUCKET_COUNT_PROPERTY) MapType(io.prestosql.spi.type.MapType) GroupByHashPageIndexerFactory(io.prestosql.GroupByHashPageIndexerFactory) Float.floatToRawIntBits(java.lang.Float.floatToRawIntBits) VARCHAR(io.prestosql.spi.type.VarcharType.VARCHAR) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) AVRO(io.prestosql.plugin.hive.HiveStorageFormat.AVRO) HiveTestUtils.rowType(io.prestosql.plugin.hive.HiveTestUtils.rowType) RecordCursor(io.prestosql.spi.connector.RecordCursor) SemiTransactionalHiveMetastore(io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore) SESSION(io.prestosql.plugin.hive.HiveTestUtils.SESSION) HiveMetastore(io.prestosql.plugin.hive.metastore.HiveMetastore) LongStream(java.util.stream.LongStream) MULTIDELIMIT(io.prestosql.plugin.hive.HiveStorageFormat.MULTIDELIMIT) MoreFutures.getFutureValue(io.airlift.concurrent.MoreFutures.getFutureValue) PAGE_SORTER(io.prestosql.plugin.hive.HiveTestUtils.PAGE_SORTER) UTC(org.joda.time.DateTimeZone.UTC) MaterializedRow(io.prestosql.testing.MaterializedRow) PrincipalPrivileges(io.prestosql.plugin.hive.metastore.PrincipalPrivileges) IS_EXTERNAL_TABLE(io.prestosql.plugin.hive.HiveTableProperties.IS_EXTERNAL_TABLE) HiveColumnStatistics(io.prestosql.plugin.hive.metastore.HiveColumnStatistics) DateTimeTestingUtils.sqlTimestampOf(io.prestosql.testing.DateTimeTestingUtils.sqlTimestampOf) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) STAGE_AND_MOVE_TO_TARGET_DIRECTORY(io.prestosql.plugin.hive.LocationHandle.WriteMode.STAGE_AND_MOVE_TO_TARGET_DIRECTORY) TableType(org.apache.hadoop.hive.metastore.TableType) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata) OrcPageSource(io.prestosql.plugin.hive.orc.OrcPageSource) HiveTestUtils.getDefaultHiveSelectiveFactories(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveSelectiveFactories) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) Assertions.assertGreaterThan(io.airlift.testing.Assertions.assertGreaterThan) MoreCollectors.onlyElement(com.google.common.collect.MoreCollectors.onlyElement) Iterables.concat(com.google.common.collect.Iterables.concat) HiveWriteUtils.createDirectory(io.prestosql.plugin.hive.HiveWriteUtils.createDirectory) Path(org.apache.hadoop.fs.Path) KILOBYTE(io.airlift.units.DataSize.Unit.KILOBYTE) Constraint(io.prestosql.spi.connector.Constraint) ImmutableMap(com.google.common.collect.ImmutableMap) ArrayType(io.prestosql.spi.type.ArrayType) CharType.createCharType(io.prestosql.spi.type.CharType.createCharType) ConnectorSplitSource(io.prestosql.spi.connector.ConnectorSplitSource) HiveTestUtils.getDefaultHiveFileWriterFactories(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveFileWriterFactories) PRESTO_VERSION_NAME(io.prestosql.plugin.hive.HiveMetadata.PRESTO_VERSION_NAME) String.format(java.lang.String.format) Preconditions.checkState(com.google.common.base.Preconditions.checkState) DataSize(io.airlift.units.DataSize) List(java.util.List) VarcharType.createUnboundedVarcharType(io.prestosql.spi.type.VarcharType.createUnboundedVarcharType) HiveTestUtils.getTypes(io.prestosql.plugin.hive.HiveTestUtils.getTypes) ConnectorTransactionHandle(io.prestosql.spi.connector.ConnectorTransactionHandle) Optional(java.util.Optional) SORTED_BY_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.SORTED_BY_PROPERTY) Logger(io.airlift.log.Logger) CounterStat(io.airlift.stats.CounterStat) HashMap(java.util.HashMap) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) HivePrincipal(io.prestosql.plugin.hive.metastore.HivePrincipal) AtomicReference(java.util.concurrent.atomic.AtomicReference) SqlVarbinary(io.prestosql.spi.type.SqlVarbinary) BridgingHiveMetastore(io.prestosql.plugin.hive.metastore.thrift.BridgingHiveMetastore) NamedTypeSignature(io.prestosql.spi.type.NamedTypeSignature) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) COMMIT(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.COMMIT) TestingMetastoreLocator(io.prestosql.plugin.hive.metastore.thrift.TestingMetastoreLocator) Verify.verify(com.google.common.base.Verify.verify) Assertions.assertLessThanOrEqual(io.airlift.testing.Assertions.assertLessThanOrEqual) Range(io.prestosql.spi.predicate.Range) Threads.daemonThreadsNamed(io.airlift.concurrent.Threads.daemonThreadsNamed) HivePrivilegeInfo(io.prestosql.plugin.hive.metastore.HivePrivilegeInfo) Objects.requireNonNull(java.util.Objects.requireNonNull) SEQUENCEFILE(io.prestosql.plugin.hive.HiveStorageFormat.SEQUENCEFILE) REAL(io.prestosql.spi.type.RealType.REAL) HiveMetadata.convertToPredicate(io.prestosql.plugin.hive.HiveMetadata.convertToPredicate) ColumnStatistics(io.prestosql.spi.statistics.ColumnStatistics) HiveTestUtils.getNoOpIndexCache(io.prestosql.plugin.hive.HiveTestUtils.getNoOpIndexCache) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) TRANSACTION_CONFLICT(io.prestosql.spi.StandardErrorCode.TRANSACTION_CONFLICT) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) CSV(io.prestosql.plugin.hive.HiveStorageFormat.CSV) HiveTestUtils.getDefaultHiveRecordCursorProvider(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveRecordCursorProvider) HiveTestUtils.arrayType(io.prestosql.plugin.hive.HiveTestUtils.arrayType) SMALLINT(io.prestosql.spi.type.SmallintType.SMALLINT) Executors.newCachedThreadPool(java.util.concurrent.Executors.newCachedThreadPool) ROLLBACK_AFTER_SINK_FINISH(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_SINK_FINISH) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) HiveColumnHandle.bucketColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) ColumnMetadata(io.prestosql.spi.connector.ColumnMetadata) Table(io.prestosql.plugin.hive.metastore.Table) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata) MaterializedResult(io.prestosql.testing.MaterializedResult) ConnectorTableMetadata(io.prestosql.spi.connector.ConnectorTableMetadata)

Aggregations

StorageFormat (io.prestosql.plugin.hive.metastore.StorageFormat)23 ConnectorSession (io.prestosql.spi.connector.ConnectorSession)19 Path (org.apache.hadoop.fs.Path)19 ImmutableMap (com.google.common.collect.ImmutableMap)17 PrestoException (io.prestosql.spi.PrestoException)17 Type (io.prestosql.spi.type.Type)17 IOException (java.io.IOException)17 List (java.util.List)17 Objects.requireNonNull (java.util.Objects.requireNonNull)17 Optional (java.util.Optional)17 Collectors.toList (java.util.stream.Collectors.toList)17 ImmutableList (com.google.common.collect.ImmutableList)15 FileSystem (org.apache.hadoop.fs.FileSystem)15 Column (io.prestosql.plugin.hive.metastore.Column)13 Table (io.prestosql.plugin.hive.metastore.Table)13 Preconditions.checkArgument (com.google.common.base.Preconditions.checkArgument)12 ImmutableMap.toImmutableMap (com.google.common.collect.ImmutableMap.toImmutableMap)12 BIGINT (io.prestosql.spi.type.BigintType.BIGINT)12 INTEGER (io.prestosql.spi.type.IntegerType.INTEGER)12 RowType (io.prestosql.spi.type.RowType)12