Search in sources :

Example 1 with Partition

use of com.facebook.presto.hive.metastore.Partition in project presto by prestodb.

the class AbstractTestHiveClient method doInsertIntoNewPartition.

private void doInsertIntoNewPartition(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
    // creating the table
    doCreateEmptyTable(tableName, storageFormat, CREATE_TABLE_COLUMNS_PARTITIONED);
    // insert the data
    String queryId = insertData(tableName, CREATE_TABLE_PARTITIONED_DATA);
    Set<String> existingFiles;
    try (Transaction transaction = newTransaction()) {
        // verify partitions were created
        List<String> partitionNames = transaction.getMetastore(tableName.getSchemaName()).getPartitionNames(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new PrestoException(HIVE_METASTORE_ERROR, "Partition metadata not available"));
        assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream().map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)).collect(toList()));
        // verify the node versions in partitions
        Map<String, Optional<Partition>> partitions = getMetastoreClient(tableName.getSchemaName()).getPartitionsByNames(tableName.getSchemaName(), tableName.getTableName(), partitionNames);
        assertEquals(partitions.size(), partitionNames.size());
        for (String partitionName : partitionNames) {
            Partition partition = partitions.get(partitionName).get();
            assertEquals(partition.getParameters().get(HiveMetadata.PRESTO_VERSION_NAME), TEST_SERVER_VERSION);
            assertEquals(partition.getParameters().get(HiveMetadata.PRESTO_QUERY_ID_NAME), queryId);
        }
        // load the new table
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
        // verify the data
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
        assertEqualsIgnoreOrder(result.getMaterializedRows(), CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows());
        // test rollback
        existingFiles = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName());
        assertFalse(existingFiles.isEmpty());
    }
    Path stagingPathRoot;
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        // "stage" insert data
        ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle);
        stagingPathRoot = getStagingPathRoot(insertTableHandle);
        ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle);
        sink.appendPage(CREATE_TABLE_PARTITIONED_DATA_2ND.toPage());
        getFutureValue(sink.finish());
        // verify we did not modify the table directory
        assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles);
        // verify all temp files start with the unique prefix
        Set<String> tempFiles = listAllDataFiles(getStagingPathRoot(insertTableHandle));
        assertTrue(!tempFiles.isEmpty());
        for (String filePath : tempFiles) {
            assertTrue(new Path(filePath).getName().startsWith(getFilePrefix(insertTableHandle)));
        }
        // rollback insert
        transaction.rollback();
    }
    // verify the data is unchanged
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, newSession(), TupleDomain.all(), OptionalInt.empty(), Optional.empty());
        assertEqualsIgnoreOrder(result.getMaterializedRows(), CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows());
        // verify we did not modify the table directory
        assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles);
        // verify temp directory is empty
        assertTrue(listAllDataFiles(stagingPathRoot).isEmpty());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(com.facebook.presto.hive.metastore.Partition) ColumnHandle(com.facebook.presto.spi.ColumnHandle) Optional(java.util.Optional) ConnectorInsertTableHandle(com.facebook.presto.spi.ConnectorInsertTableHandle) PrestoException(com.facebook.presto.spi.PrestoException) ConnectorTableHandle(com.facebook.presto.spi.ConnectorTableHandle) ConnectorSession(com.facebook.presto.spi.ConnectorSession) TestingConnectorSession(com.facebook.presto.testing.TestingConnectorSession) ConnectorMetadata(com.facebook.presto.spi.connector.ConnectorMetadata) MaterializedResult(com.facebook.presto.testing.MaterializedResult) ConnectorPageSink(com.facebook.presto.spi.ConnectorPageSink)

Example 2 with Partition

use of com.facebook.presto.hive.metastore.Partition in project presto by prestodb.

the class HiveMetadata method finishCreateTable.

@Override
public Optional<ConnectorOutputMetadata> finishCreateTable(ConnectorSession session, ConnectorOutputTableHandle tableHandle, Collection<Slice> fragments) {
    HiveOutputTableHandle handle = (HiveOutputTableHandle) tableHandle;
    List<PartitionUpdate> partitionUpdates = fragments.stream().map(Slice::getBytes).map(partitionUpdateCodec::fromJson).collect(toList());
    Path targetPath = locationService.targetPathRoot(handle.getLocationHandle());
    Path writePath = locationService.writePathRoot(handle.getLocationHandle()).get();
    Table table = buildTableObject(session.getQueryId(), handle.getSchemaName(), handle.getTableName(), handle.getTableOwner(), handle.getInputColumns(), handle.getTableStorageFormat(), handle.getPartitionedBy(), handle.getBucketProperty(), handle.getAdditionalTableParameters(), targetPath, false, prestoVersion);
    PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(handle.getTableOwner());
    partitionUpdates = PartitionUpdate.mergePartitionUpdates(partitionUpdates);
    if (handle.getBucketProperty().isPresent()) {
        ImmutableList<PartitionUpdate> partitionUpdatesForMissingBuckets = computePartitionUpdatesForMissingBuckets(handle, table, partitionUpdates);
        // replace partitionUpdates before creating the empty files so that those files will be cleaned up if we end up rollback
        partitionUpdates = PartitionUpdate.mergePartitionUpdates(Iterables.concat(partitionUpdates, partitionUpdatesForMissingBuckets));
        for (PartitionUpdate partitionUpdate : partitionUpdatesForMissingBuckets) {
            Optional<Partition> partition = table.getPartitionColumns().isEmpty() ? Optional.empty() : Optional.of(buildPartitionObject(session.getQueryId(), table, partitionUpdate));
            createEmptyFile(partitionUpdate.getWritePath(), table, partition, partitionUpdate.getFileNames());
        }
    }
    metastore.createTable(session, table, principalPrivileges, Optional.of(writePath));
    if (!handle.getPartitionedBy().isEmpty()) {
        if (respectTableFormat) {
            Verify.verify(handle.getPartitionStorageFormat() == handle.getTableStorageFormat());
        }
        partitionUpdates.forEach(partitionUpdate -> metastore.addPartition(session, handle.getSchemaName(), handle.getTableName(), buildPartitionObject(session.getQueryId(), table, partitionUpdate), partitionUpdate.getWritePath()));
    }
    return Optional.of(new HiveWrittenPartitions(partitionUpdates.stream().map(PartitionUpdate::getName).collect(Collectors.toList())));
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(com.facebook.presto.hive.metastore.Partition) Table(com.facebook.presto.hive.metastore.Table) PrincipalPrivileges(com.facebook.presto.hive.metastore.PrincipalPrivileges) Slice(io.airlift.slice.Slice)

Example 3 with Partition

use of com.facebook.presto.hive.metastore.Partition in project presto by prestodb.

the class ManifestPartitionLoader method createInternalHiveSplitFactory.

private InternalHiveSplitFactory createInternalHiveSplitFactory(Table table, HivePartitionMetadata partition, ConnectorSession session, Optional<Domain> pathDomain, HdfsEnvironment hdfsEnvironment, HdfsContext hdfsContext, boolean schedulerUsesHostAddresses) throws IOException {
    String partitionName = partition.getHivePartition().getPartitionId();
    Storage storage = partition.getPartition().map(Partition::getStorage).orElse(table.getStorage());
    String inputFormatName = storage.getStorageFormat().getInputFormat();
    int partitionDataColumnCount = partition.getPartition().map(p -> p.getColumns().size()).orElse(table.getDataColumns().size());
    List<HivePartitionKey> partitionKeys = getPartitionKeys(table, partition.getPartition(), partitionName);
    Path path = new Path(getPartitionLocation(table, partition.getPartition()));
    Configuration configuration = hdfsEnvironment.getConfiguration(hdfsContext, path);
    InputFormat<?, ?> inputFormat = getInputFormat(configuration, inputFormatName, false);
    ExtendedFileSystem fileSystem = hdfsEnvironment.getFileSystem(hdfsContext, path);
    return new InternalHiveSplitFactory(fileSystem, inputFormat, pathDomain, getNodeSelectionStrategy(session), getMaxInitialSplitSize(session), false, new HiveSplitPartitionInfo(storage, path.toUri(), partitionKeys, partitionName, partitionDataColumnCount, partition.getTableToPartitionMapping(), Optional.empty(), partition.getRedundantColumnDomains()), schedulerUsesHostAddresses, partition.getEncryptionInformation());
}
Also used : Table(com.facebook.presto.hive.metastore.Table) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) BlockLocation(org.apache.hadoop.fs.BlockLocation) MetastoreUtil.getPartitionLocation(com.facebook.presto.hive.metastore.MetastoreUtil.getPartitionLocation) MALFORMED_HIVE_FILE_STATISTICS(com.facebook.presto.hive.HiveErrorCode.MALFORMED_HIVE_FILE_STATISTICS) PrestoException(com.facebook.presto.spi.PrestoException) FileStatus(org.apache.hadoop.fs.FileStatus) HiveSessionProperties.getNodeSelectionStrategy(com.facebook.presto.hive.HiveSessionProperties.getNodeSelectionStrategy) Partition(com.facebook.presto.hive.metastore.Partition) HiveUtil.getInputFormat(com.facebook.presto.hive.HiveUtil.getInputFormat) IGNORED(com.facebook.presto.hive.NestedDirectoryPolicy.IGNORED) ImmutableList(com.google.common.collect.ImmutableList) Verify.verify(com.google.common.base.Verify.verify) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) Objects.requireNonNull(java.util.Objects.requireNonNull) InputFormat(org.apache.hadoop.mapred.InputFormat) Path(org.apache.hadoop.fs.Path) RECURSE(com.facebook.presto.hive.NestedDirectoryPolicy.RECURSE) MANIFEST_VERSION(com.facebook.presto.hive.HiveManifestUtils.MANIFEST_VERSION) HiveManifestUtils.decompressFileSizes(com.facebook.presto.hive.HiveManifestUtils.decompressFileSizes) Storage(com.facebook.presto.hive.metastore.Storage) Iterator(java.util.Iterator) FILE_SIZES(com.facebook.presto.hive.HiveManifestUtils.FILE_SIZES) HiveManifestUtils.decompressFileNames(com.facebook.presto.hive.HiveManifestUtils.decompressFileNames) ExtendedFileSystem(com.facebook.presto.hive.filesystem.ExtendedFileSystem) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) InternalHiveSplitFactory(com.facebook.presto.hive.util.InternalHiveSplitFactory) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HiveSessionProperties.getMaxSplitSize(com.facebook.presto.hive.HiveSessionProperties.getMaxSplitSize) IOException(java.io.IOException) FILE_NAMES(com.facebook.presto.hive.HiveManifestUtils.FILE_NAMES) Domain(com.facebook.presto.common.predicate.Domain) String.format(java.lang.String.format) HiveSessionProperties.getMaxInitialSplitSize(com.facebook.presto.hive.HiveSessionProperties.getMaxInitialSplitSize) ConnectorSession(com.facebook.presto.spi.ConnectorSession) UncheckedIOException(java.io.UncheckedIOException) List(java.util.List) VERSION_1(com.facebook.presto.hive.HiveManifestUtils.VERSION_1) Optional(java.util.Optional) HiveSessionProperties.isManifestVerificationEnabled(com.facebook.presto.hive.HiveSessionProperties.isManifestVerificationEnabled) Path(org.apache.hadoop.fs.Path) InternalHiveSplitFactory(com.facebook.presto.hive.util.InternalHiveSplitFactory) Storage(com.facebook.presto.hive.metastore.Storage) Configuration(org.apache.hadoop.conf.Configuration) ExtendedFileSystem(com.facebook.presto.hive.filesystem.ExtendedFileSystem)

Example 4 with Partition

use of com.facebook.presto.hive.metastore.Partition in project presto by prestodb.

the class StoragePartitionLoader method getBucketedSplits.

private List<InternalHiveSplit> getBucketedSplits(Path path, ExtendedFileSystem fileSystem, InternalHiveSplitFactory splitFactory, BucketSplitInfo bucketSplitInfo, Optional<HiveSplit.BucketConversion> bucketConversion, String partitionName, boolean splittable, PathFilter pathFilter) {
    int readBucketCount = bucketSplitInfo.getReadBucketCount();
    int tableBucketCount = bucketSplitInfo.getTableBucketCount();
    int partitionBucketCount = bucketConversion.map(HiveSplit.BucketConversion::getPartitionBucketCount).orElse(tableBucketCount);
    int bucketCount = max(readBucketCount, partitionBucketCount);
    checkState(readBucketCount <= tableBucketCount, "readBucketCount(%s) should be less than or equal to tableBucketCount(%s)", readBucketCount, tableBucketCount);
    // list all files in the partition
    List<HiveFileInfo> fileInfos = new ArrayList<>(partitionBucketCount);
    try {
        Iterators.addAll(fileInfos, directoryLister.list(fileSystem, table, path, namenodeStats, pathFilter, new HiveDirectoryContext(FAIL, isUseListDirectoryCache(session))));
    } catch (HiveFileIterator.NestedDirectoryNotAllowedException e) {
        // Fail here to be on the safe side. This seems to be the same as what Hive does
        throw new PrestoException(HIVE_INVALID_BUCKET_FILES, format("Hive table '%s' is corrupt. Found sub-directory in bucket directory for partition: %s", table.getSchemaTableName(), partitionName));
    }
    ListMultimap<Integer, HiveFileInfo> bucketToFileInfo = ArrayListMultimap.create();
    if (!shouldCreateFilesForMissingBuckets(table, session)) {
        fileInfos.stream().forEach(fileInfo -> {
            String fileName = fileInfo.getPath().getName();
            OptionalInt bucket = getBucketNumber(fileName);
            if (bucket.isPresent()) {
                bucketToFileInfo.put(bucket.getAsInt(), fileInfo);
            } else {
                throw new PrestoException(HIVE_INVALID_BUCKET_FILES, format("invalid hive bucket file name: %s", fileName));
            }
        });
    } else {
        // build mapping of file name to bucket
        for (HiveFileInfo file : fileInfos) {
            String fileName = file.getPath().getName();
            OptionalInt bucket = getBucketNumber(fileName);
            if (bucket.isPresent()) {
                bucketToFileInfo.put(bucket.getAsInt(), file);
                continue;
            }
            // legacy mode requires exactly one file per bucket
            if (fileInfos.size() != partitionBucketCount) {
                throw new PrestoException(HIVE_INVALID_BUCKET_FILES, format("Hive table '%s' is corrupt. File '%s' does not match the standard naming pattern, and the number " + "of files in the directory (%s) does not match the declared bucket count (%s) for partition: %s", table.getSchemaTableName(), fileName, fileInfos.size(), partitionBucketCount, partitionName));
            }
            if (fileInfos.get(0).getPath().getName().matches("\\d+")) {
                try {
                    // File names are integer if they are created when file_renaming_enabled is set to true
                    fileInfos.sort(Comparator.comparingInt(fileInfo -> Integer.parseInt(fileInfo.getPath().getName())));
                } catch (NumberFormatException e) {
                    throw new PrestoException(HIVE_INVALID_FILE_NAMES, format("Hive table '%s' is corrupt. Some of the filenames in the partition: %s are not integers", new SchemaTableName(table.getDatabaseName(), table.getTableName()), partitionName));
                }
            } else {
                // Sort FileStatus objects (instead of, e.g., fileStatus.getPath().toString). This matches org.apache.hadoop.hive.ql.metadata.Table.getSortedPaths
                fileInfos.sort(null);
            }
            // Use position in sorted list as the bucket number
            bucketToFileInfo.clear();
            for (int i = 0; i < fileInfos.size(); i++) {
                bucketToFileInfo.put(i, fileInfos.get(i));
            }
            break;
        }
    }
    // convert files internal splits
    List<InternalHiveSplit> splitList = new ArrayList<>();
    for (int bucketNumber = 0; bucketNumber < bucketCount; bucketNumber++) {
        // Physical bucket #. This determine file name. It also determines the order of splits in the result.
        int partitionBucketNumber = bucketNumber % partitionBucketCount;
        if (!bucketToFileInfo.containsKey(partitionBucketNumber)) {
            continue;
        }
        // Logical bucket #. Each logical bucket corresponds to a "bucket" from engine's perspective.
        int readBucketNumber = bucketNumber % readBucketCount;
        boolean containsIneligibleTableBucket = false;
        List<Integer> eligibleTableBucketNumbers = new ArrayList<>();
        for (int tableBucketNumber = bucketNumber % tableBucketCount; tableBucketNumber < tableBucketCount; tableBucketNumber += bucketCount) {
            // table bucket number: this is used for evaluating "$bucket" filters.
            if (bucketSplitInfo.isTableBucketEnabled(tableBucketNumber)) {
                eligibleTableBucketNumbers.add(tableBucketNumber);
            } else {
                containsIneligibleTableBucket = true;
            }
        }
        if (!eligibleTableBucketNumbers.isEmpty() && containsIneligibleTableBucket) {
            throw new PrestoException(NOT_SUPPORTED, "The bucket filter cannot be satisfied. There are restrictions on the bucket filter when all the following is true: " + "1. a table has a different buckets count as at least one of its partitions that is read in this query; " + "2. the table has a different but compatible bucket number with another table in the query; " + "3. some buckets of the table is filtered out from the query, most likely using a filter on \"$bucket\". " + "(table name: " + table.getTableName() + ", table bucket count: " + tableBucketCount + ", " + "partition bucket count: " + partitionBucketCount + ", effective reading bucket count: " + readBucketCount + ")");
        }
        if (!eligibleTableBucketNumbers.isEmpty()) {
            for (HiveFileInfo fileInfo : bucketToFileInfo.get(partitionBucketNumber)) {
                eligibleTableBucketNumbers.stream().map(tableBucketNumber -> splitFactory.createInternalHiveSplit(fileInfo, readBucketNumber, tableBucketNumber, splittable)).forEach(optionalSplit -> optionalSplit.ifPresent(splitList::add));
            }
        }
    }
    return splitList;
}
Also used : ArrayListMultimap(com.google.common.collect.ArrayListMultimap) LoadingCache(com.google.common.cache.LoadingCache) ListMultimap(com.google.common.collect.ListMultimap) HiveSessionProperties.isFileSplittable(com.facebook.presto.hive.HiveSessionProperties.isFileSplittable) HiveSessionProperties.isUseListDirectoryCache(com.facebook.presto.hive.HiveSessionProperties.isUseListDirectoryCache) FileStatus(org.apache.hadoop.fs.FileStatus) IntPredicate(java.util.function.IntPredicate) HiveUtil.getHeaderCount(com.facebook.presto.hive.HiveUtil.getHeaderCount) HiveUtil.getInputFormat(com.facebook.presto.hive.HiveUtil.getInputFormat) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) SchemaTableName(com.facebook.presto.spi.SchemaTableName) FileSplit(org.apache.hadoop.mapred.FileSplit) CharStreams(com.google.common.io.CharStreams) Configuration(org.apache.hadoop.conf.Configuration) InputFormat(org.apache.hadoop.mapred.InputFormat) Path(org.apache.hadoop.fs.Path) HiveMetadata.shouldCreateFilesForMissingBuckets(com.facebook.presto.hive.HiveMetadata.shouldCreateFilesForMissingBuckets) HIVE_INVALID_FILE_NAMES(com.facebook.presto.hive.HiveErrorCode.HIVE_INVALID_FILE_NAMES) Function(com.google.common.base.Function) FileInputFormat(org.apache.hadoop.mapred.FileInputFormat) ExtendedFileSystem(com.facebook.presto.hive.filesystem.ExtendedFileSystem) HiveWriterFactory.getBucketNumber(com.facebook.presto.hive.HiveWriterFactory.getBucketNumber) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) HiveSessionProperties.isStreamingAggregationEnabled(com.facebook.presto.hive.HiveSessionProperties.isStreamingAggregationEnabled) S3SelectPushdown.shouldEnablePushdownForTable(com.facebook.presto.hive.S3SelectPushdown.shouldEnablePushdownForTable) StandardCharsets(java.nio.charset.StandardCharsets) String.format(java.lang.String.format) HiveSessionProperties.getMaxInitialSplitSize(com.facebook.presto.hive.HiveSessionProperties.getMaxInitialSplitSize) FAIL(com.facebook.presto.hive.NestedDirectoryPolicy.FAIL) Preconditions.checkState(com.google.common.base.Preconditions.checkState) ConnectorSession(com.facebook.presto.spi.ConnectorSession) CacheLoader(com.google.common.cache.CacheLoader) List(java.util.List) NOT_SUPPORTED(com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED) HIDDEN_FILES_PATH_FILTER(org.apache.hadoop.hive.common.FileUtils.HIDDEN_FILES_PATH_FILTER) Optional(java.util.Optional) Math.max(java.lang.Math.max) CacheBuilder(com.google.common.cache.CacheBuilder) TextInputFormat(org.apache.hadoop.mapred.TextInputFormat) Table(com.facebook.presto.hive.metastore.Table) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) HiveUtil.getFooterCount(com.facebook.presto.hive.HiveUtil.getFooterCount) PathFilter(org.apache.hadoop.fs.PathFilter) MetastoreUtil.getPartitionLocation(com.facebook.presto.hive.metastore.MetastoreUtil.getPartitionLocation) PrestoException(com.facebook.presto.spi.PrestoException) Deque(java.util.Deque) HiveSessionProperties.getNodeSelectionStrategy(com.facebook.presto.hive.HiveSessionProperties.getNodeSelectionStrategy) OptionalInt(java.util.OptionalInt) Iterators(com.google.common.collect.Iterators) Partition(com.facebook.presto.hive.metastore.Partition) SymlinkTextInputFormat(org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat) ArrayList(java.util.ArrayList) IGNORED(com.facebook.presto.hive.NestedDirectoryPolicy.IGNORED) ImmutableList(com.google.common.collect.ImmutableList) HiveUtil.shouldUseFileSplitsFromInputFormat(com.facebook.presto.hive.HiveUtil.shouldUseFileSplitsFromInputFormat) ConfigurationUtils.toJobConf(com.facebook.presto.hive.util.ConfigurationUtils.toJobConf) Objects.requireNonNull(java.util.Objects.requireNonNull) HIVE_BAD_DATA(com.facebook.presto.hive.HiveErrorCode.HIVE_BAD_DATA) RECURSE(com.facebook.presto.hive.NestedDirectoryPolicy.RECURSE) HIVE_INVALID_BUCKET_FILES(com.facebook.presto.hive.HiveErrorCode.HIVE_INVALID_BUCKET_FILES) Futures.immediateFuture(com.google.common.util.concurrent.Futures.immediateFuture) Storage(com.facebook.presto.hive.metastore.Storage) Properties(java.util.Properties) Iterator(java.util.Iterator) InternalHiveSplitFactory(com.facebook.presto.hive.util.InternalHiveSplitFactory) HiveColumnHandle.pathColumnHandle(com.facebook.presto.hive.HiveColumnHandle.pathColumnHandle) HoodieROTablePathFilter(org.apache.hudi.hadoop.HoodieROTablePathFilter) IOException(java.io.IOException) HiveFileIterator(com.facebook.presto.hive.util.HiveFileIterator) InputStreamReader(java.io.InputStreamReader) Domain(com.facebook.presto.common.predicate.Domain) JobConf(org.apache.hadoop.mapred.JobConf) Streams.stream(com.google.common.collect.Streams.stream) InputSplit(org.apache.hadoop.mapred.InputSplit) BufferedReader(java.io.BufferedReader) Maps.fromProperties(com.google.common.collect.Maps.fromProperties) Comparator(java.util.Comparator) HiveBucketing.getVirtualBucketNumber(com.facebook.presto.hive.HiveBucketing.getVirtualBucketNumber) HiveUtil.isHudiParquetInputFormat(com.facebook.presto.hive.HiveUtil.isHudiParquetInputFormat) MetastoreUtil.getHiveSchema(com.facebook.presto.hive.metastore.MetastoreUtil.getHiveSchema) ArrayList(java.util.ArrayList) PrestoException(com.facebook.presto.spi.PrestoException) OptionalInt(java.util.OptionalInt) SchemaTableName(com.facebook.presto.spi.SchemaTableName) HiveFileIterator(com.facebook.presto.hive.util.HiveFileIterator)

Example 5 with Partition

use of com.facebook.presto.hive.metastore.Partition in project presto by prestodb.

the class AbstractTestHiveClient method testStorePartitionWithStatistics.

protected void testStorePartitionWithStatistics(List<ColumnMetadata> columns, PartitionStatistics statsForAllColumns1, PartitionStatistics statsForAllColumns2, PartitionStatistics statsForSubsetOfColumns, PartitionStatistics emptyStatistics, Duration delayBetweenAlters) throws Exception {
    SchemaTableName tableName = temporaryTable("store_partition_with_statistics");
    try {
        doCreateEmptyTable(tableName, ORC, columns);
        ExtendedHiveMetastore metastoreClient = getMetastoreClient();
        Table table = metastoreClient.getTable(METASTORE_CONTEXT, tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
        List<String> partitionValues = ImmutableList.of("2016-01-01");
        String partitionName = makePartName(ImmutableList.of("ds"), partitionValues);
        Partition partition = createDummyPartition(table, partitionName);
        // create partition with stats for all columns
        metastoreClient.addPartitions(METASTORE_CONTEXT, tableName.getSchemaName(), tableName.getTableName(), ImmutableList.of(new PartitionWithStatistics(partition, partitionName, statsForAllColumns1)));
        assertEquals(metastoreClient.getPartition(METASTORE_CONTEXT, tableName.getSchemaName(), tableName.getTableName(), partitionValues).get().getStorage().getStorageFormat(), fromHiveStorageFormat(ORC));
        assertThat(metastoreClient.getPartitionStatistics(METASTORE_CONTEXT, tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))).isEqualTo(ImmutableMap.of(partitionName, statsForAllColumns1));
        sleep(delayBetweenAlters.toMillis());
        // alter the partition into one with other stats
        Partition modifiedPartition = Partition.builder(partition).withStorage(storage -> storage.setStorageFormat(fromHiveStorageFormat(DWRF)).setLocation(partitionTargetPath(tableName, partitionName))).build();
        metastoreClient.alterPartition(METASTORE_CONTEXT, tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, statsForAllColumns2));
        assertEquals(metastoreClient.getPartition(METASTORE_CONTEXT, tableName.getSchemaName(), tableName.getTableName(), partitionValues).get().getStorage().getStorageFormat(), fromHiveStorageFormat(DWRF));
        assertThat(metastoreClient.getPartitionStatistics(METASTORE_CONTEXT, tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))).isEqualTo(ImmutableMap.of(partitionName, statsForAllColumns2));
        sleep(delayBetweenAlters.toMillis());
        // alter the partition into one with stats for only subset of columns
        modifiedPartition = Partition.builder(partition).withStorage(storage -> storage.setStorageFormat(fromHiveStorageFormat(TEXTFILE)).setLocation(partitionTargetPath(tableName, partitionName))).build();
        metastoreClient.alterPartition(METASTORE_CONTEXT, tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, statsForSubsetOfColumns));
        assertThat(metastoreClient.getPartitionStatistics(METASTORE_CONTEXT, tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))).isEqualTo(ImmutableMap.of(partitionName, statsForSubsetOfColumns));
        sleep(delayBetweenAlters.toMillis());
        // alter the partition into one without stats
        modifiedPartition = Partition.builder(partition).withStorage(storage -> storage.setStorageFormat(fromHiveStorageFormat(TEXTFILE)).setLocation(partitionTargetPath(tableName, partitionName))).build();
        metastoreClient.alterPartition(METASTORE_CONTEXT, tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, emptyStatistics));
        assertThat(metastoreClient.getPartitionStatistics(METASTORE_CONTEXT, tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))).isEqualTo(ImmutableMap.of(partitionName, emptyStatistics));
    } finally {
        dropTable(tableName);
    }
}
Also used : RecordPageSource(com.facebook.presto.spi.RecordPageSource) SkipException(org.testng.SkipException) WarningCollector(com.facebook.presto.spi.WarningCollector) CharType.createCharType(com.facebook.presto.common.type.CharType.createCharType) DateTimeZone(org.joda.time.DateTimeZone) SORTED_BY_PROPERTY(com.facebook.presto.hive.HiveTableProperties.SORTED_BY_PROPERTY) VarcharType.createUnboundedVarcharType(com.facebook.presto.common.type.VarcharType.createUnboundedVarcharType) FileSystem(org.apache.hadoop.fs.FileSystem) PrestoPrincipal(com.facebook.presto.spi.security.PrestoPrincipal) CounterStat(com.facebook.airlift.stats.CounterStat) Test(org.testng.annotations.Test) HIVE_PARTITION_SCHEMA_MISMATCH(com.facebook.presto.hive.HiveErrorCode.HIVE_PARTITION_SCHEMA_MISMATCH) FileStatus(org.apache.hadoop.fs.FileStatus) MoreFutures.getFutureValue(com.facebook.airlift.concurrent.MoreFutures.getFutureValue) MAX_PARTITION_KEY_COLUMN_INDEX(com.facebook.presto.hive.HiveColumnHandle.MAX_PARTITION_KEY_COLUMN_INDEX) NOT_PARTITIONED(com.facebook.presto.spi.connector.NotPartitionedPartitionHandle.NOT_PARTITIONED) Slices(io.airlift.slice.Slices) Map(java.util.Map) ENGLISH(java.util.Locale.ENGLISH) Assert.assertFalse(org.testng.Assert.assertFalse) NullableValue(com.facebook.presto.common.predicate.NullableValue) StorageFormat(com.facebook.presto.hive.metastore.StorageFormat) SqlFunctionProperties(com.facebook.presto.common.function.SqlFunctionProperties) SemiTransactionalHiveMetastore(com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore) TABLE(com.facebook.presto.hive.CacheQuotaScope.TABLE) ZoneId(java.time.ZoneId) HiveColumnHandle.bucketColumnHandle(com.facebook.presto.hive.HiveColumnHandle.bucketColumnHandle) MoreExecutors.directExecutor(com.google.common.util.concurrent.MoreExecutors.directExecutor) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) SqlTimestamp(com.facebook.presto.common.type.SqlTimestamp) Predicate(com.google.common.base.Predicate) HiveColumnStatistics.createBinaryColumnStatistics(com.facebook.presto.hive.metastore.HiveColumnStatistics.createBinaryColumnStatistics) HivePrivilegeInfo(com.facebook.presto.hive.metastore.HivePrivilegeInfo) StandardTypes(com.facebook.presto.common.type.StandardTypes) Table(com.facebook.presto.hive.metastore.Table) REGULAR(com.facebook.presto.hive.HiveColumnHandle.ColumnType.REGULAR) BUCKET_COLUMN_NAME(com.facebook.presto.hive.HiveColumnHandle.BUCKET_COLUMN_NAME) HiveTestUtils.arrayType(com.facebook.presto.hive.HiveTestUtils.arrayType) HiveUtil.columnExtraInfo(com.facebook.presto.hive.HiveUtil.columnExtraInfo) HiveColumnStatistics(com.facebook.presto.hive.metastore.HiveColumnStatistics) ConnectorOutputTableHandle(com.facebook.presto.spi.ConnectorOutputTableHandle) REAL(com.facebook.presto.common.type.RealType.REAL) PAGEFILE(com.facebook.presto.hive.HiveStorageFormat.PAGEFILE) OptionalLong(java.util.OptionalLong) ROLLBACK_AFTER_SINK_FINISH(com.facebook.presto.hive.AbstractTestHiveClient.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_SINK_FINISH) GIGABYTE(io.airlift.units.DataSize.Unit.GIGABYTE) HIVE_BYTE(com.facebook.presto.hive.HiveType.HIVE_BYTE) SqlVarbinary(com.facebook.presto.common.type.SqlVarbinary) Assertions.assertInstanceOf(com.facebook.airlift.testing.Assertions.assertInstanceOf) CSV(com.facebook.presto.hive.HiveStorageFormat.CSV) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) MetastoreCacheScope(com.facebook.presto.hive.metastore.CachingHiveMetastore.MetastoreCacheScope) ImmutableMultimap(com.google.common.collect.ImmutableMultimap) ThriftHiveMetastore(com.facebook.presto.hive.metastore.thrift.ThriftHiveMetastore) AfterClass(org.testng.annotations.AfterClass) FileUtils.makePartName(org.apache.hadoop.hive.common.FileUtils.makePartName) IOException(java.io.IOException) Iterables.getOnlyElement(com.google.common.collect.Iterables.getOnlyElement) TestingConnectorSession(com.facebook.presto.testing.TestingConnectorSession) HostAndPort(com.google.common.net.HostAndPort) Domain(com.facebook.presto.common.predicate.Domain) FUNCTION_AND_TYPE_MANAGER(com.facebook.presto.hive.HiveTestUtils.FUNCTION_AND_TYPE_MANAGER) ConnectorTableLayoutResult(com.facebook.presto.spi.ConnectorTableLayoutResult) SqlFunctionId(com.facebook.presto.spi.function.SqlFunctionId) TypeSignature.parseTypeSignature(com.facebook.presto.common.type.TypeSignature.parseTypeSignature) SchemaTablePrefix(com.facebook.presto.spi.SchemaTablePrefix) PartitionStatistics(com.facebook.presto.hive.metastore.PartitionStatistics) SplitSchedulingContext(com.facebook.presto.spi.connector.ConnectorSplitManager.SplitSchedulingContext) PRESTO_QUERY_ID_NAME(com.facebook.presto.hive.metastore.MetastoreUtil.PRESTO_QUERY_ID_NAME) HiveColumnStatistics.createDecimalColumnStatistics(com.facebook.presto.hive.metastore.HiveColumnStatistics.createDecimalColumnStatistics) ConnectorViewDefinition(com.facebook.presto.spi.ConnectorViewDefinition) RowType(com.facebook.presto.common.type.RowType) ViewNotFoundException(com.facebook.presto.spi.ViewNotFoundException) ORC(com.facebook.presto.hive.HiveStorageFormat.ORC) ROLLBACK_AFTER_FINISH_INSERT(com.facebook.presto.hive.AbstractTestHiveClient.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_FINISH_INSERT) HiveFilterPushdown.pushdownFilter(com.facebook.presto.hive.rule.HiveFilterPushdown.pushdownFilter) Duration(io.airlift.units.Duration) MaterializedResult.materializeSourceDataStream(com.facebook.presto.testing.MaterializedResult.materializeSourceDataStream) HIVE_FLOAT(com.facebook.presto.hive.HiveType.HIVE_FLOAT) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) SchemaTableName(com.facebook.presto.spi.SchemaTableName) TypeProvider(com.facebook.presto.sql.planner.TypeProvider) Locale(java.util.Locale) UNGROUPED_SCHEDULING(com.facebook.presto.spi.connector.ConnectorSplitManager.SplitSchedulingStrategy.UNGROUPED_SCHEDULING) BUCKETED_BY_PROPERTY(com.facebook.presto.hive.HiveTableProperties.BUCKETED_BY_PROPERTY) Thread.sleep(java.lang.Thread.sleep) DiscretePredicates(com.facebook.presto.spi.DiscretePredicates) TEXTFILE(com.facebook.presto.hive.HiveStorageFormat.TEXTFILE) PageFilePageSource(com.facebook.presto.hive.pagefile.PageFilePageSource) ImmutableSet(com.google.common.collect.ImmutableSet) OFFLINE_DATA_DEBUG_MODE_ENABLED(com.facebook.presto.hive.HiveSessionProperties.OFFLINE_DATA_DEBUG_MODE_ENABLED) TimeZone(java.util.TimeZone) BeforeClass(org.testng.annotations.BeforeClass) Collection(java.util.Collection) DWRF(com.facebook.presto.hive.HiveStorageFormat.DWRF) UUID(java.util.UUID) Assert.assertNotNull(org.testng.Assert.assertNotNull) TestingNodeManager(com.facebook.presto.testing.TestingNodeManager) Range(com.facebook.presto.common.predicate.Range) Objects(java.util.Objects) HIVE_STRING(com.facebook.presto.hive.HiveType.HIVE_STRING) MetastoreUtil.toPartitionValues(com.facebook.presto.hive.metastore.MetastoreUtil.toPartitionValues) HiveColumnStatistics.createBooleanColumnStatistics(com.facebook.presto.hive.metastore.HiveColumnStatistics.createBooleanColumnStatistics) METADATA(com.facebook.presto.hive.HiveTestUtils.METADATA) HiveTestUtils.getDefaultHiveBatchPageSourceFactories(com.facebook.presto.hive.HiveTestUtils.getDefaultHiveBatchPageSourceFactories) IntStream(java.util.stream.IntStream) MoreExecutors.listeningDecorator(com.google.common.util.concurrent.MoreExecutors.listeningDecorator) ConnectorMetadata(com.facebook.presto.spi.connector.ConnectorMetadata) MapType(com.facebook.presto.common.type.MapType) FILTER_STATS_CALCULATOR_SERVICE(com.facebook.presto.hive.HiveTestUtils.FILTER_STATS_CALCULATOR_SERVICE) Assert.assertNull(org.testng.Assert.assertNull) HiveColumnStatistics.createIntegerColumnStatistics(com.facebook.presto.hive.metastore.HiveColumnStatistics.createIntegerColumnStatistics) RcFilePageSource(com.facebook.presto.hive.rcfile.RcFilePageSource) ConnectorTableLayoutHandle(com.facebook.presto.spi.ConnectorTableLayoutHandle) OptionalDouble(java.util.OptionalDouble) Assert.assertEquals(org.testng.Assert.assertEquals) ConnectorTableHandle(com.facebook.presto.spi.ConnectorTableHandle) OptionalInt(java.util.OptionalInt) HashSet(java.util.HashSet) ROLLBACK_AFTER_APPEND_PAGE(com.facebook.presto.hive.AbstractTestHiveClient.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_APPEND_PAGE) Subfield(com.facebook.presto.common.Subfield) ImmutableList(com.google.common.collect.ImmutableList) HIVE_DOUBLE(com.facebook.presto.hive.HiveType.HIVE_DOUBLE) HiveCluster(com.facebook.presto.hive.metastore.thrift.HiveCluster) HiveTestUtils.mapType(com.facebook.presto.hive.HiveTestUtils.mapType) Math.toIntExact(java.lang.Math.toIntExact) ConnectorPageSinkProvider(com.facebook.presto.spi.connector.ConnectorPageSinkProvider) Type(com.facebook.presto.common.type.Type) ExecutorService(java.util.concurrent.ExecutorService) NamedTypeSignature(com.facebook.presto.common.type.NamedTypeSignature) ConnectorInsertTableHandle(com.facebook.presto.spi.ConnectorInsertTableHandle) DEFAULT_COLUMN_CONVERTER_PROVIDER(com.facebook.presto.hive.HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER) USER(com.facebook.presto.spi.security.PrincipalType.USER) HiveTestUtils.getDefaultHiveSelectivePageSourceFactories(com.facebook.presto.hive.HiveTestUtils.getDefaultHiveSelectivePageSourceFactories) HiveColumnStatistics.createStringColumnStatistics(com.facebook.presto.hive.metastore.HiveColumnStatistics.createStringColumnStatistics) CachingHiveMetastore(com.facebook.presto.hive.metastore.CachingHiveMetastore) UTF_8(java.nio.charset.StandardCharsets.UTF_8) ConnectorTableLayout(com.facebook.presto.spi.ConnectorTableLayout) Assert.fail(org.testng.Assert.fail) DateTime(org.joda.time.DateTime) PageSinkContext(com.facebook.presto.spi.PageSinkContext) OutputStreamDataSinkFactory(com.facebook.presto.hive.datasink.OutputStreamDataSinkFactory) MetastoreUtil.createDirectory(com.facebook.presto.hive.metastore.MetastoreUtil.createDirectory) TRANSACTION_CONFLICT(com.facebook.presto.spi.StandardErrorCode.TRANSACTION_CONFLICT) HivePartitionMutator(com.facebook.presto.hive.metastore.HivePartitionMutator) Hashing.sha256(com.google.common.hash.Hashing.sha256) MaterializedResult(com.facebook.presto.testing.MaterializedResult) Collectors.toList(java.util.stream.Collectors.toList) ConnectorPartitioningMetadata(com.facebook.presto.spi.connector.ConnectorPartitioningMetadata) BUCKET_COUNT_PROPERTY(com.facebook.presto.hive.HiveTableProperties.BUCKET_COUNT_PROPERTY) Assert.assertTrue(org.testng.Assert.assertTrue) PlanBuilder.expression(com.facebook.presto.sql.planner.iterative.rule.test.PlanBuilder.expression) HiveTestUtils.getDefaultHiveRecordCursorProvider(com.facebook.presto.hive.HiveTestUtils.getDefaultHiveRecordCursorProvider) DecimalType.createDecimalType(com.facebook.presto.common.type.DecimalType.createDecimalType) HIVE_COMPATIBLE(com.facebook.presto.hive.BucketFunctionType.HIVE_COMPATIBLE) Page(com.facebook.presto.common.Page) Arrays(java.util.Arrays) ConnectorSplitSource(com.facebook.presto.spi.ConnectorSplitSource) ColumnStatistics(com.facebook.presto.spi.statistics.ColumnStatistics) HIVE_SHORT(com.facebook.presto.hive.HiveType.HIVE_SHORT) PartitionWithStatistics(com.facebook.presto.hive.metastore.PartitionWithStatistics) MetastoreContext(com.facebook.presto.hive.metastore.MetastoreContext) Maps.uniqueIndex(com.google.common.collect.Maps.uniqueIndex) SqlInvokedFunction(com.facebook.presto.spi.function.SqlInvokedFunction) ROLLBACK_AFTER_BEGIN_INSERT(com.facebook.presto.hive.AbstractTestHiveClient.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_BEGIN_INSERT) ConnectorTransactionHandle(com.facebook.presto.spi.connector.ConnectorTransactionHandle) HiveColumnStatistics.createDoubleColumnStatistics(com.facebook.presto.hive.metastore.HiveColumnStatistics.createDoubleColumnStatistics) BigDecimal(java.math.BigDecimal) TupleDomain.withColumnDomains(com.facebook.presto.common.predicate.TupleDomain.withColumnDomains) Sets.difference(com.google.common.collect.Sets.difference) ExtendedHiveMetastore(com.facebook.presto.hive.metastore.ExtendedHiveMetastore) ROLLBACK_AFTER_DELETE(com.facebook.presto.hive.AbstractTestHiveClient.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_DELETE) PRESTO_VERSION_NAME(com.facebook.presto.hive.HiveMetadata.PRESTO_VERSION_NAME) ConnectorPageSink(com.facebook.presto.spi.ConnectorPageSink) Varchars.isVarcharType(com.facebook.presto.common.type.Varchars.isVarcharType) HIVE_LONG(com.facebook.presto.hive.HiveType.HIVE_LONG) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) ConnectorPageSourceProvider(com.facebook.presto.spi.connector.ConnectorPageSourceProvider) Assert.assertNotEquals(org.testng.Assert.assertNotEquals) PrincipalPrivileges(com.facebook.presto.hive.metastore.PrincipalPrivileges) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) Set(java.util.Set) TestingRowExpressionTranslator(com.facebook.presto.sql.TestingRowExpressionTranslator) MILLISECONDS(java.util.concurrent.TimeUnit.MILLISECONDS) Executors(java.util.concurrent.Executors) ConnectorSession(com.facebook.presto.spi.ConnectorSession) FeaturesConfig(com.facebook.presto.sql.analyzer.FeaturesConfig) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) INTEGER(com.facebook.presto.common.type.IntegerType.INTEGER) Assertions.assertGreaterThan(com.facebook.airlift.testing.Assertions.assertGreaterThan) MetastoreUtil.getMetastoreHeaders(com.facebook.presto.hive.metastore.MetastoreUtil.getMetastoreHeaders) ParquetPageSource(com.facebook.presto.hive.parquet.ParquetPageSource) OrcSelectivePageSource(com.facebook.presto.hive.orc.OrcSelectivePageSource) STAGE_AND_MOVE_TO_TARGET_DIRECTORY(com.facebook.presto.hive.LocationHandle.WriteMode.STAGE_AND_MOVE_TO_TARGET_DIRECTORY) VariablesExtractor.extractUnique(com.facebook.presto.sql.planner.VariablesExtractor.extractUnique) MoreObjects.toStringHelper(com.google.common.base.MoreObjects.toStringHelper) Slice(io.airlift.slice.Slice) Chars.isCharType(com.facebook.presto.common.type.Chars.isCharType) TINYINT(com.facebook.presto.common.type.TinyintType.TINYINT) Assertions.assertGreaterThanOrEqual(com.facebook.airlift.testing.Assertions.assertGreaterThanOrEqual) TIMESTAMP(com.facebook.presto.common.type.TimestampType.TIMESTAMP) WriteInfo(com.facebook.presto.hive.LocationService.WriteInfo) HYPER_LOG_LOG(com.facebook.presto.common.type.HyperLogLogType.HYPER_LOG_LOG) HiveBasicStatistics.createEmptyStatistics(com.facebook.presto.hive.HiveBasicStatistics.createEmptyStatistics) DATE(com.facebook.presto.common.type.DateType.DATE) FUNCTION_RESOLUTION(com.facebook.presto.hive.HiveTestUtils.FUNCTION_RESOLUTION) ArrayList(java.util.ArrayList) ROW_EXPRESSION_SERVICE(com.facebook.presto.hive.HiveTestUtils.ROW_EXPRESSION_SERVICE) Float.floatToRawIntBits(java.lang.Float.floatToRawIntBits) SqlDate(com.facebook.presto.common.type.SqlDate) NON_CACHEABLE(com.facebook.presto.spi.SplitContext.NON_CACHEABLE) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) RCTEXT(com.facebook.presto.hive.HiveStorageFormat.RCTEXT) BOOLEAN(com.facebook.presto.common.type.BooleanType.BOOLEAN) ArrayType(com.facebook.presto.common.type.ArrayType) TableHandle(com.facebook.presto.spi.TableHandle) JSON(com.facebook.presto.hive.HiveStorageFormat.JSON) HiveTestUtils.rowType(com.facebook.presto.hive.HiveTestUtils.rowType) ConnectorTableMetadata(com.facebook.presto.spi.ConnectorTableMetadata) BIGINT(com.facebook.presto.common.type.BigintType.BIGINT) LongStream(java.util.stream.LongStream) Executor(java.util.concurrent.Executor) Constraint(com.facebook.presto.spi.Constraint) UTC_KEY(com.facebook.presto.common.type.TimeZoneKey.UTC_KEY) UTC(org.joda.time.DateTimeZone.UTC) TestingHiveCluster(com.facebook.presto.hive.metastore.thrift.TestingHiveCluster) RCBINARY(com.facebook.presto.hive.HiveStorageFormat.RCBINARY) ConnectorSplit(com.facebook.presto.spi.ConnectorSplit) HivePrivilege(com.facebook.presto.hive.metastore.HivePrivilegeInfo.HivePrivilege) SMALLINT(com.facebook.presto.common.type.SmallintType.SMALLINT) ColumnHandle(com.facebook.presto.spi.ColumnHandle) Assertions.assertLessThanOrEqual(com.facebook.airlift.testing.Assertions.assertLessThanOrEqual) ROLLBACK_RIGHT_AWAY(com.facebook.presto.hive.AbstractTestHiveClient.TransactionDeleteInsertTestTag.ROLLBACK_RIGHT_AWAY) HiveMetadata.convertToPredicate(com.facebook.presto.hive.HiveMetadata.convertToPredicate) HIVE_BOOLEAN(com.facebook.presto.hive.HiveType.HIVE_BOOLEAN) BridgingHiveMetastore(com.facebook.presto.hive.metastore.thrift.BridgingHiveMetastore) HiveColumnStatistics.createDateColumnStatistics(com.facebook.presto.hive.metastore.HiveColumnStatistics.createDateColumnStatistics) ValueSet(com.facebook.presto.common.predicate.ValueSet) SORTED_WRITE_TO_TEMP_PATH_ENABLED(com.facebook.presto.hive.HiveSessionProperties.SORTED_WRITE_TO_TEMP_PATH_ENABLED) MetadataManager(com.facebook.presto.metadata.MetadataManager) HiveTestUtils.getDefaultHiveFileWriterFactories(com.facebook.presto.hive.HiveTestUtils.getDefaultHiveFileWriterFactories) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) TableStatistics(com.facebook.presto.spi.statistics.TableStatistics) HiveType.toHiveType(com.facebook.presto.hive.HiveType.toHiveType) VariableReferenceExpression(com.facebook.presto.spi.relation.VariableReferenceExpression) CacheConfig(com.facebook.presto.cache.CacheConfig) HiveTestUtils.getDefaultOrcFileWriterFactory(com.facebook.presto.hive.HiveTestUtils.getDefaultOrcFileWriterFactory) MoreCollectors.onlyElement(com.google.common.collect.MoreCollectors.onlyElement) OrcBatchPageSource(com.facebook.presto.hive.orc.OrcBatchPageSource) Iterables.concat(com.google.common.collect.Iterables.concat) MANAGED_TABLE(com.facebook.presto.hive.metastore.PrestoTableType.MANAGED_TABLE) AVRO(com.facebook.presto.hive.HiveStorageFormat.AVRO) Path(org.apache.hadoop.fs.Path) KILOBYTE(io.airlift.units.DataSize.Unit.KILOBYTE) METASTORE_CONTEXT(com.facebook.presto.hive.HiveQueryRunner.METASTORE_CONTEXT) ConnectorSplitManager(com.facebook.presto.spi.connector.ConnectorSplitManager) ImmutableMap(com.google.common.collect.ImmutableMap) DOUBLE(com.facebook.presto.common.type.DoubleType.DOUBLE) TRUE_CONSTANT(com.facebook.presto.expressions.LogicalRowExpressions.TRUE_CONSTANT) String.format(java.lang.String.format) COMMIT(com.facebook.presto.hive.AbstractTestHiveClient.TransactionDeleteInsertTestTag.COMMIT) Preconditions.checkState(com.google.common.base.Preconditions.checkState) Threads.daemonThreadsNamed(com.facebook.airlift.concurrent.Threads.daemonThreadsNamed) STORAGE_FORMAT_PROPERTY(com.facebook.presto.hive.HiveTableProperties.STORAGE_FORMAT_PROPERTY) PAGE_SORTER(com.facebook.presto.hive.HiveTestUtils.PAGE_SORTER) RecordCursor(com.facebook.presto.spi.RecordCursor) DataSize(io.airlift.units.DataSize) List(java.util.List) PrestoTableType(com.facebook.presto.hive.metastore.PrestoTableType) ColumnMetadata(com.facebook.presto.spi.ColumnMetadata) NOT_SUPPORTED(com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED) Optional(java.util.Optional) ConnectorId(com.facebook.presto.spi.ConnectorId) PARTITION_KEY(com.facebook.presto.hive.HiveColumnHandle.ColumnType.PARTITION_KEY) NoHdfsAuthentication(com.facebook.presto.hive.authentication.NoHdfsAuthentication) Logger(com.facebook.airlift.log.Logger) Column(com.facebook.presto.hive.metastore.Column) VARCHAR(com.facebook.presto.common.type.VarcharType.VARCHAR) HiveTestUtils.getTypes(com.facebook.presto.hive.HiveTestUtils.getTypes) DateTimeTestingUtils.sqlTimestampOf(com.facebook.presto.testing.DateTimeTestingUtils.sqlTimestampOf) HIVE_INVALID_PARTITION_VALUE(com.facebook.presto.hive.HiveErrorCode.HIVE_INVALID_PARTITION_VALUE) HashMap(java.util.HashMap) PrestoException(com.facebook.presto.spi.PrestoException) AtomicReference(java.util.concurrent.atomic.AtomicReference) VarcharType.createVarcharType(com.facebook.presto.common.type.VarcharType.createVarcharType) PARQUET(com.facebook.presto.hive.HiveStorageFormat.PARQUET) Partition(com.facebook.presto.hive.metastore.Partition) HIVE_INT(com.facebook.presto.hive.HiveType.HIVE_INT) PARTITIONED_BY_PROPERTY(com.facebook.presto.hive.HiveTableProperties.PARTITIONED_BY_PROPERTY) Assertions.assertThatThrownBy(org.assertj.core.api.Assertions.assertThatThrownBy) Verify.verify(com.google.common.base.Verify.verify) SESSION(com.facebook.presto.hive.HiveTestUtils.SESSION) Objects.requireNonNull(java.util.Objects.requireNonNull) SEQUENCEFILE(com.facebook.presto.hive.HiveStorageFormat.SEQUENCEFILE) SortingColumn(com.facebook.presto.hive.metastore.SortingColumn) RowExpression(com.facebook.presto.spi.relation.RowExpression) Assertions.assertEqualsIgnoreOrder(com.facebook.airlift.testing.Assertions.assertEqualsIgnoreOrder) VARBINARY(com.facebook.presto.common.type.VarbinaryType.VARBINARY) TupleDomain(com.facebook.presto.common.predicate.TupleDomain) ConnectorIdentity(com.facebook.presto.spi.security.ConnectorIdentity) HiveBasicStatistics.createZeroStatistics(com.facebook.presto.hive.HiveBasicStatistics.createZeroStatistics) ConnectorPageSource(com.facebook.presto.spi.ConnectorPageSource) TableNotFoundException(com.facebook.presto.spi.TableNotFoundException) Executors.newCachedThreadPool(java.util.concurrent.Executors.newCachedThreadPool) MaterializedRow(com.facebook.presto.testing.MaterializedRow) StorageFormat.fromHiveStorageFormat(com.facebook.presto.hive.metastore.StorageFormat.fromHiveStorageFormat) RowFieldName(com.facebook.presto.common.type.RowFieldName) GroupByHashPageIndexerFactory(com.facebook.presto.GroupByHashPageIndexerFactory) Block(com.facebook.presto.common.block.Block) JoinCompiler(com.facebook.presto.sql.gen.JoinCompiler) SECONDS(java.util.concurrent.TimeUnit.SECONDS) TableNotFoundException(com.facebook.presto.spi.TableNotFoundException) Partition(com.facebook.presto.hive.metastore.Partition) Table(com.facebook.presto.hive.metastore.Table) PartitionWithStatistics(com.facebook.presto.hive.metastore.PartitionWithStatistics) ExtendedHiveMetastore(com.facebook.presto.hive.metastore.ExtendedHiveMetastore) SchemaTableName(com.facebook.presto.spi.SchemaTableName)

Aggregations

Partition (com.facebook.presto.hive.metastore.Partition)40 Table (com.facebook.presto.hive.metastore.Table)29 PrestoException (com.facebook.presto.spi.PrestoException)25 Optional (java.util.Optional)19 SchemaTableName (com.facebook.presto.spi.SchemaTableName)18 ImmutableMap (com.google.common.collect.ImmutableMap)18 ImmutableList (com.google.common.collect.ImmutableList)17 List (java.util.List)17 Map (java.util.Map)17 Path (org.apache.hadoop.fs.Path)17 MetastoreContext (com.facebook.presto.hive.metastore.MetastoreContext)14 Objects.requireNonNull (java.util.Objects.requireNonNull)14 Domain (com.facebook.presto.common.predicate.Domain)13 PartitionStatistics (com.facebook.presto.hive.metastore.PartitionStatistics)13 ConnectorSession (com.facebook.presto.spi.ConnectorSession)13 TableNotFoundException (com.facebook.presto.spi.TableNotFoundException)12 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)12 IOException (java.io.IOException)12 ArrayList (java.util.ArrayList)12 Column (com.facebook.presto.hive.metastore.Column)11