Search in sources :

Example 1 with HIVE_CANNOT_OPEN_SPLIT

use of io.prestosql.plugin.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT in project boostkit-bigdata by kunpengcompute.

the class ParquetPageSourceFactory method createParquetPageSource.

public static ParquetPageSource createParquetPageSource(HdfsEnvironment hdfsEnvironment, String user, Configuration configuration, Path path, long start, long length, long fileSize, Properties schema, List<HiveColumnHandle> columns, boolean useParquetColumnNames, boolean failOnCorruptedParquetStatistics, DataSize maxReadBlockSize, TypeManager typeManager, TupleDomain<HiveColumnHandle> effectivePredicate, FileFormatDataSourceStats stats, DateTimeZone timeZone) {
    AggregatedMemoryContext systemMemoryContext = newSimpleAggregatedMemoryContext();
    ParquetDataSource dataSource = null;
    DateTimeZone readerTimeZone = timeZone;
    try {
        FileSystem fileSystem = hdfsEnvironment.getFileSystem(user, path, configuration);
        FSDataInputStream inputStream = hdfsEnvironment.doAs(user, () -> fileSystem.open(path));
        ParquetMetadata parquetMetadata = MetadataReader.readFooter(inputStream, path, fileSize);
        FileMetaData fileMetaData = parquetMetadata.getFileMetaData();
        MessageType fileSchema = fileMetaData.getSchema();
        dataSource = buildHdfsParquetDataSource(inputStream, path, fileSize, stats);
        String writerTimeZoneId = fileMetaData.getKeyValueMetaData().get(WRITER_TIME_ZONE_KEY);
        if (writerTimeZoneId != null && !writerTimeZoneId.equalsIgnoreCase(readerTimeZone.getID())) {
            readerTimeZone = DateTimeZone.forID(writerTimeZoneId);
        }
        List<org.apache.parquet.schema.Type> fields = columns.stream().filter(column -> column.getColumnType() == REGULAR).map(column -> getParquetType(column, fileSchema, useParquetColumnNames)).filter(Objects::nonNull).collect(toList());
        MessageType requestedSchema = new MessageType(fileSchema.getName(), fields);
        ImmutableList.Builder<BlockMetaData> footerBlocks = ImmutableList.builder();
        for (BlockMetaData block : parquetMetadata.getBlocks()) {
            long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset();
            if (firstDataPage >= start && firstDataPage < start + length) {
                footerBlocks.add(block);
            }
        }
        Map<List<String>, RichColumnDescriptor> descriptorsByPath = getDescriptors(fileSchema, requestedSchema);
        TupleDomain<ColumnDescriptor> parquetTupleDomain = getParquetTupleDomain(descriptorsByPath, effectivePredicate);
        Predicate parquetPredicate = buildPredicate(requestedSchema, parquetTupleDomain, descriptorsByPath);
        final ParquetDataSource finalDataSource = dataSource;
        ImmutableList.Builder<BlockMetaData> blocks = ImmutableList.builder();
        for (BlockMetaData block : footerBlocks.build()) {
            if (predicateMatches(parquetPredicate, block, finalDataSource, descriptorsByPath, parquetTupleDomain, failOnCorruptedParquetStatistics)) {
                blocks.add(block);
            }
        }
        MessageColumnIO messageColumnIO = getColumnIO(fileSchema, requestedSchema);
        ParquetReader parquetReader = new ParquetReader(Optional.ofNullable(fileMetaData.getCreatedBy()), messageColumnIO, blocks.build(), dataSource, readerTimeZone, systemMemoryContext, maxReadBlockSize);
        return new ParquetPageSource(parquetReader, fileSchema, messageColumnIO, typeManager, schema, columns, effectivePredicate, useParquetColumnNames);
    } catch (Exception e) {
        try {
            if (dataSource != null) {
                dataSource.close();
            }
        } catch (IOException ignored) {
        }
        if (e instanceof PrestoException) {
            throw (PrestoException) e;
        }
        if (e instanceof ParquetCorruptionException) {
            throw new PrestoException(HIVE_BAD_DATA, e);
        }
        if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed") || e instanceof FileNotFoundException) {
            throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, e);
        }
        String message = format("Error opening Hive split %s (offset=%s, length=%s): %s", path, start, length, e.getMessage());
        if (e instanceof BlockMissingException) {
            throw new PrestoException(HIVE_MISSING_DATA, message, e);
        }
        throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
    }
}
Also used : DateTimeZone(org.joda.time.DateTimeZone) ParquetTypeUtils.getColumnIO(io.prestosql.parquet.ParquetTypeUtils.getColumnIO) HiveSessionProperties.isUseParquetColumnNames(io.prestosql.plugin.hive.HiveSessionProperties.isUseParquetColumnNames) FileSystem(org.apache.hadoop.fs.FileSystem) HivePartitionKey(io.prestosql.plugin.hive.HivePartitionKey) HiveColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle) DataReader(com.huawei.boostkit.omnidata.reader.DataReader) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) RichColumnDescriptor(io.prestosql.parquet.RichColumnDescriptor) ParquetTypeUtils.getParquetTypeByName(io.prestosql.parquet.ParquetTypeUtils.getParquetTypeByName) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) PredicateUtils.buildPredicate(io.prestosql.parquet.predicate.PredicateUtils.buildPredicate) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) HiveConfig(io.prestosql.plugin.hive.HiveConfig) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) AggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext) Path(org.apache.hadoop.fs.Path) OMNIDATA_CLIENT_TARGET_LIST(com.huawei.boostkit.omnidata.transfer.OmniDataProperty.OMNIDATA_CLIENT_TARGET_LIST) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) DataReaderFactory(com.huawei.boostkit.omnidata.reader.DataReaderFactory) HiveSessionProperties.isFailOnCorruptedParquetStatistics(io.prestosql.plugin.hive.HiveSessionProperties.isFailOnCorruptedParquetStatistics) PageSourceUtil.buildPushdownContext(io.prestosql.plugin.hive.util.PageSourceUtil.buildPushdownContext) PrestoException(io.prestosql.spi.PrestoException) HdfsParquetDataSource.buildHdfsParquetDataSource(io.prestosql.plugin.hive.parquet.HdfsParquetDataSource.buildHdfsParquetDataSource) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) HivePushDownPageSource(io.prestosql.plugin.hive.HivePushDownPageSource) MetadataReader(io.prestosql.parquet.reader.MetadataReader) FileFormatDataSourceStats(io.prestosql.plugin.hive.FileFormatDataSourceStats) FileNotFoundException(java.io.FileNotFoundException) String.format(java.lang.String.format) DataSource(com.huawei.boostkit.omnidata.model.datasource.DataSource) Objects(java.util.Objects) MessageType(org.apache.parquet.schema.MessageType) DataSize(io.airlift.units.DataSize) List(java.util.List) HiveOffloadExpression(io.prestosql.plugin.hive.HiveOffloadExpression) ConnectorPageSource(io.prestosql.spi.connector.ConnectorPageSource) HiveUtil.shouldUseRecordReaderFromInputFormat(io.prestosql.plugin.hive.HiveUtil.shouldUseRecordReaderFromInputFormat) HIVE_BAD_DATA(io.prestosql.plugin.hive.HiveErrorCode.HIVE_BAD_DATA) ColumnDescriptor(org.apache.parquet.column.ColumnDescriptor) BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) PredicateUtils.predicateMatches(io.prestosql.parquet.predicate.PredicateUtils.predicateMatches) Domain(io.prestosql.spi.predicate.Domain) Entry(java.util.Map.Entry) Optional(java.util.Optional) IndexMetadata(io.prestosql.spi.heuristicindex.IndexMetadata) AggregatedMemoryContext.newSimpleAggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext) SplitMetadata(io.prestosql.spi.heuristicindex.SplitMetadata) MessageColumnIO(org.apache.parquet.io.MessageColumnIO) HIVE_CANNOT_OPEN_SPLIT(io.prestosql.plugin.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT) Strings.nullToEmpty(com.google.common.base.Strings.nullToEmpty) ParquetReader(io.prestosql.parquet.reader.ParquetReader) HiveSessionProperties(io.prestosql.plugin.hive.HiveSessionProperties) ParquetTypeUtils.getDescriptors(io.prestosql.parquet.ParquetTypeUtils.getDescriptors) OptionalInt(java.util.OptionalInt) TaskSource(com.huawei.boostkit.omnidata.model.TaskSource) Predicate(io.prestosql.parquet.predicate.Predicate) Inject(javax.inject.Inject) HiveUtil.getDeserializerClassName(io.prestosql.plugin.hive.HiveUtil.getDeserializerClassName) HdfsEnvironment(io.prestosql.plugin.hive.HdfsEnvironment) HIVE_MISSING_DATA(io.prestosql.plugin.hive.HiveErrorCode.HIVE_MISSING_DATA) REGULAR(io.prestosql.plugin.hive.HiveColumnHandle.ColumnType.REGULAR) ImmutableList(com.google.common.collect.ImmutableList) Objects.requireNonNull(java.util.Objects.requireNonNull) DynamicFilterSupplier(io.prestosql.spi.dynamicfilter.DynamicFilterSupplier) HivePageSourceFactory(io.prestosql.plugin.hive.HivePageSourceFactory) Properties(java.util.Properties) DeleteDeltaLocations(io.prestosql.plugin.hive.DeleteDeltaLocations) TupleDomain(io.prestosql.spi.predicate.TupleDomain) TypeManager(io.prestosql.spi.type.TypeManager) Page(io.prestosql.spi.Page) IOException(java.io.IOException) PRIMITIVE(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category.PRIMITIVE) ParquetDataSource(io.prestosql.parquet.ParquetDataSource) Collectors.toList(java.util.stream.Collectors.toList) HiveSessionProperties.getParquetMaxReadBlockSize(io.prestosql.plugin.hive.HiveSessionProperties.getParquetMaxReadBlockSize) ParquetCorruptionException(io.prestosql.parquet.ParquetCorruptionException) FileMetaData(org.apache.parquet.hadoop.metadata.FileMetaData) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) OpenLooKengDeserializer(com.huawei.boostkit.omnidata.decode.impl.OpenLooKengDeserializer) BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) ImmutableList(com.google.common.collect.ImmutableList) RichColumnDescriptor(io.prestosql.parquet.RichColumnDescriptor) FileNotFoundException(java.io.FileNotFoundException) PrestoException(io.prestosql.spi.PrestoException) MessageColumnIO(org.apache.parquet.io.MessageColumnIO) PredicateUtils.buildPredicate(io.prestosql.parquet.predicate.PredicateUtils.buildPredicate) Predicate(io.prestosql.parquet.predicate.Predicate) ParquetCorruptionException(io.prestosql.parquet.ParquetCorruptionException) FileSystem(org.apache.hadoop.fs.FileSystem) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) Collectors.toList(java.util.stream.Collectors.toList) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) FileMetaData(org.apache.parquet.hadoop.metadata.FileMetaData) MessageType(org.apache.parquet.schema.MessageType) HdfsParquetDataSource.buildHdfsParquetDataSource(io.prestosql.plugin.hive.parquet.HdfsParquetDataSource.buildHdfsParquetDataSource) ParquetDataSource(io.prestosql.parquet.ParquetDataSource) RichColumnDescriptor(io.prestosql.parquet.RichColumnDescriptor) ColumnDescriptor(org.apache.parquet.column.ColumnDescriptor) ParquetReader(io.prestosql.parquet.reader.ParquetReader) IOException(java.io.IOException) AggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext) AggregatedMemoryContext.newSimpleAggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext) DateTimeZone(org.joda.time.DateTimeZone) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) PrestoException(io.prestosql.spi.PrestoException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) ParquetCorruptionException(io.prestosql.parquet.ParquetCorruptionException) MessageType(org.apache.parquet.schema.MessageType) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Example 2 with HIVE_CANNOT_OPEN_SPLIT

use of io.prestosql.plugin.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT in project hetu-core by openlookeng.

the class ParquetPageSourceFactory method createParquetPageSource.

public static ParquetPageSource createParquetPageSource(HdfsEnvironment hdfsEnvironment, String user, Configuration configuration, Path path, long start, long length, long fileSize, Properties schema, List<HiveColumnHandle> columns, boolean useParquetColumnNames, boolean failOnCorruptedParquetStatistics, DataSize maxReadBlockSize, TypeManager typeManager, TupleDomain<HiveColumnHandle> effectivePredicate, FileFormatDataSourceStats stats, DateTimeZone timeZone) {
    AggregatedMemoryContext systemMemoryContext = newSimpleAggregatedMemoryContext();
    ParquetDataSource dataSource = null;
    DateTimeZone readerTimeZone = timeZone;
    try {
        FileSystem fileSystem = hdfsEnvironment.getFileSystem(user, path, configuration);
        FSDataInputStream inputStream = hdfsEnvironment.doAs(user, () -> fileSystem.open(path));
        ParquetMetadata parquetMetadata = MetadataReader.readFooter(inputStream, path, fileSize);
        FileMetaData fileMetaData = parquetMetadata.getFileMetaData();
        MessageType fileSchema = fileMetaData.getSchema();
        dataSource = buildHdfsParquetDataSource(inputStream, path, fileSize, stats);
        String writerTimeZoneId = fileMetaData.getKeyValueMetaData().get(WRITER_TIME_ZONE_KEY);
        if (writerTimeZoneId != null && !writerTimeZoneId.equalsIgnoreCase(readerTimeZone.getID())) {
            readerTimeZone = DateTimeZone.forID(writerTimeZoneId);
        }
        List<org.apache.parquet.schema.Type> fields = columns.stream().filter(column -> column.getColumnType() == REGULAR).map(column -> getParquetType(column, fileSchema, useParquetColumnNames)).filter(Objects::nonNull).collect(toList());
        MessageType requestedSchema = new MessageType(fileSchema.getName(), fields);
        ImmutableList.Builder<BlockMetaData> footerBlocks = ImmutableList.builder();
        for (BlockMetaData block : parquetMetadata.getBlocks()) {
            long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset();
            if (firstDataPage >= start && firstDataPage < start + length) {
                footerBlocks.add(block);
            }
        }
        Map<List<String>, RichColumnDescriptor> descriptorsByPath = getDescriptors(fileSchema, requestedSchema);
        TupleDomain<ColumnDescriptor> parquetTupleDomain = getParquetTupleDomain(descriptorsByPath, effectivePredicate);
        Predicate parquetPredicate = buildPredicate(requestedSchema, parquetTupleDomain, descriptorsByPath);
        final ParquetDataSource finalDataSource = dataSource;
        ImmutableList.Builder<BlockMetaData> blocks = ImmutableList.builder();
        for (BlockMetaData block : footerBlocks.build()) {
            if (predicateMatches(parquetPredicate, block, finalDataSource, descriptorsByPath, parquetTupleDomain, failOnCorruptedParquetStatistics)) {
                blocks.add(block);
            }
        }
        MessageColumnIO messageColumnIO = getColumnIO(fileSchema, requestedSchema);
        ParquetReader parquetReader = new ParquetReader(Optional.ofNullable(fileMetaData.getCreatedBy()), messageColumnIO, blocks.build(), dataSource, readerTimeZone, systemMemoryContext, maxReadBlockSize);
        return new ParquetPageSource(parquetReader, fileSchema, messageColumnIO, typeManager, schema, columns, effectivePredicate, useParquetColumnNames);
    } catch (Exception e) {
        try {
            if (dataSource != null) {
                dataSource.close();
            }
        } catch (IOException ignored) {
        }
        if (e instanceof PrestoException) {
            throw (PrestoException) e;
        }
        if (e instanceof ParquetCorruptionException) {
            throw new PrestoException(HIVE_BAD_DATA, e);
        }
        if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed") || e instanceof FileNotFoundException) {
            throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, e);
        }
        String message = format("Error opening Hive split %s (offset=%s, length=%s): %s", path, start, length, e.getMessage());
        if (e instanceof BlockMissingException) {
            throw new PrestoException(HIVE_MISSING_DATA, message, e);
        }
        throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
    }
}
Also used : DateTimeZone(org.joda.time.DateTimeZone) ParquetTypeUtils.getColumnIO(io.prestosql.parquet.ParquetTypeUtils.getColumnIO) HiveSessionProperties.isUseParquetColumnNames(io.prestosql.plugin.hive.HiveSessionProperties.isUseParquetColumnNames) FileSystem(org.apache.hadoop.fs.FileSystem) HiveColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) RichColumnDescriptor(io.prestosql.parquet.RichColumnDescriptor) ParquetTypeUtils.getParquetTypeByName(io.prestosql.parquet.ParquetTypeUtils.getParquetTypeByName) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) PredicateUtils.buildPredicate(io.prestosql.parquet.predicate.PredicateUtils.buildPredicate) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) HiveConfig(io.prestosql.plugin.hive.HiveConfig) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) AggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext) Path(org.apache.hadoop.fs.Path) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) HiveSessionProperties.isFailOnCorruptedParquetStatistics(io.prestosql.plugin.hive.HiveSessionProperties.isFailOnCorruptedParquetStatistics) PrestoException(io.prestosql.spi.PrestoException) HdfsParquetDataSource.buildHdfsParquetDataSource(io.prestosql.plugin.hive.parquet.HdfsParquetDataSource.buildHdfsParquetDataSource) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) MetadataReader(io.prestosql.parquet.reader.MetadataReader) FileFormatDataSourceStats(io.prestosql.plugin.hive.FileFormatDataSourceStats) FileNotFoundException(java.io.FileNotFoundException) String.format(java.lang.String.format) Objects(java.util.Objects) MessageType(org.apache.parquet.schema.MessageType) DataSize(io.airlift.units.DataSize) List(java.util.List) ConnectorPageSource(io.prestosql.spi.connector.ConnectorPageSource) HiveUtil.shouldUseRecordReaderFromInputFormat(io.prestosql.plugin.hive.HiveUtil.shouldUseRecordReaderFromInputFormat) HIVE_BAD_DATA(io.prestosql.plugin.hive.HiveErrorCode.HIVE_BAD_DATA) ColumnDescriptor(org.apache.parquet.column.ColumnDescriptor) BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) PredicateUtils.predicateMatches(io.prestosql.parquet.predicate.PredicateUtils.predicateMatches) Domain(io.prestosql.spi.predicate.Domain) Entry(java.util.Map.Entry) Optional(java.util.Optional) IndexMetadata(io.prestosql.spi.heuristicindex.IndexMetadata) AggregatedMemoryContext.newSimpleAggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext) SplitMetadata(io.prestosql.spi.heuristicindex.SplitMetadata) MessageColumnIO(org.apache.parquet.io.MessageColumnIO) HIVE_CANNOT_OPEN_SPLIT(io.prestosql.plugin.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT) Strings.nullToEmpty(com.google.common.base.Strings.nullToEmpty) ParquetReader(io.prestosql.parquet.reader.ParquetReader) ParquetTypeUtils.getDescriptors(io.prestosql.parquet.ParquetTypeUtils.getDescriptors) Predicate(io.prestosql.parquet.predicate.Predicate) Inject(javax.inject.Inject) HiveUtil.getDeserializerClassName(io.prestosql.plugin.hive.HiveUtil.getDeserializerClassName) HdfsEnvironment(io.prestosql.plugin.hive.HdfsEnvironment) HIVE_MISSING_DATA(io.prestosql.plugin.hive.HiveErrorCode.HIVE_MISSING_DATA) REGULAR(io.prestosql.plugin.hive.HiveColumnHandle.ColumnType.REGULAR) ImmutableList(com.google.common.collect.ImmutableList) Objects.requireNonNull(java.util.Objects.requireNonNull) DynamicFilterSupplier(io.prestosql.spi.dynamicfilter.DynamicFilterSupplier) HivePageSourceFactory(io.prestosql.plugin.hive.HivePageSourceFactory) Properties(java.util.Properties) DeleteDeltaLocations(io.prestosql.plugin.hive.DeleteDeltaLocations) TupleDomain(io.prestosql.spi.predicate.TupleDomain) TypeManager(io.prestosql.spi.type.TypeManager) IOException(java.io.IOException) PRIMITIVE(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category.PRIMITIVE) ParquetDataSource(io.prestosql.parquet.ParquetDataSource) Collectors.toList(java.util.stream.Collectors.toList) HiveSessionProperties.getParquetMaxReadBlockSize(io.prestosql.plugin.hive.HiveSessionProperties.getParquetMaxReadBlockSize) ParquetCorruptionException(io.prestosql.parquet.ParquetCorruptionException) FileMetaData(org.apache.parquet.hadoop.metadata.FileMetaData) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) ImmutableList(com.google.common.collect.ImmutableList) RichColumnDescriptor(io.prestosql.parquet.RichColumnDescriptor) FileNotFoundException(java.io.FileNotFoundException) PrestoException(io.prestosql.spi.PrestoException) MessageColumnIO(org.apache.parquet.io.MessageColumnIO) PredicateUtils.buildPredicate(io.prestosql.parquet.predicate.PredicateUtils.buildPredicate) Predicate(io.prestosql.parquet.predicate.Predicate) ParquetCorruptionException(io.prestosql.parquet.ParquetCorruptionException) FileSystem(org.apache.hadoop.fs.FileSystem) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) Collectors.toList(java.util.stream.Collectors.toList) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) FileMetaData(org.apache.parquet.hadoop.metadata.FileMetaData) MessageType(org.apache.parquet.schema.MessageType) HdfsParquetDataSource.buildHdfsParquetDataSource(io.prestosql.plugin.hive.parquet.HdfsParquetDataSource.buildHdfsParquetDataSource) ParquetDataSource(io.prestosql.parquet.ParquetDataSource) RichColumnDescriptor(io.prestosql.parquet.RichColumnDescriptor) ColumnDescriptor(org.apache.parquet.column.ColumnDescriptor) ParquetReader(io.prestosql.parquet.reader.ParquetReader) IOException(java.io.IOException) AggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext) AggregatedMemoryContext.newSimpleAggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext) DateTimeZone(org.joda.time.DateTimeZone) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) PrestoException(io.prestosql.spi.PrestoException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) ParquetCorruptionException(io.prestosql.parquet.ParquetCorruptionException) MessageType(org.apache.parquet.schema.MessageType) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Example 3 with HIVE_CANNOT_OPEN_SPLIT

use of io.prestosql.plugin.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT in project hetu-core by openlookeng.

the class OrcPageSourceFactory method createOrcPageSource.

public static OrcPageSource createOrcPageSource(HdfsEnvironment hdfsEnvironment, String sessionUser, Configuration configuration, Path path, long start, long length, long fileSize, List<HiveColumnHandle> columns, boolean useOrcColumnNames, boolean isFullAcid, TupleDomain<HiveColumnHandle> effectivePredicate, DateTimeZone legacyFileTimeZone, TypeManager typeManager, DataSize maxMergeDistance, DataSize maxBufferSize, DataSize streamBufferSize, DataSize tinyStripeThreshold, DataSize maxReadBlockSize, boolean lazyReadSmallRanges, boolean orcBloomFiltersEnabled, FileFormatDataSourceStats stats, Optional<DynamicFilterSupplier> dynamicFilters, Optional<DeleteDeltaLocations> deleteDeltaLocations, Optional<Long> startRowOffsetOfFile, Optional<List<IndexMetadata>> indexes, SplitMetadata splitMetadata, OrcCacheStore orcCacheStore, OrcCacheProperties orcCacheProperties, int domainCompactionThreshold, boolean pageMetadataEnabled, long dataSourceLastModifiedTime) {
    for (HiveColumnHandle column : columns) {
        checkArgument(column.getColumnType() == HiveColumnHandle.ColumnType.REGULAR || column.getHiveColumnIndex() == HiveColumnHandle.ROW_ID__COLUMN_INDEX, "column type must be regular: %s", column);
    }
    checkArgument(!effectivePredicate.isNone());
    OrcDataSource orcDataSource;
    try {
        // Always create a lazy Stream. HDFS stream opened only when required.
        FSDataInputStream inputStream = new FSDataInputStream(new LazyFSInputStream(() -> {
            FileSystem fileSystem = hdfsEnvironment.getFileSystem(sessionUser, path, configuration);
            return hdfsEnvironment.doAs(sessionUser, () -> fileSystem.open(path));
        }));
        orcDataSource = new HdfsOrcDataSource(new OrcDataSourceId(path.toString()), fileSize, maxMergeDistance, maxBufferSize, streamBufferSize, lazyReadSmallRanges, inputStream, stats, dataSourceLastModifiedTime);
    } catch (Exception e) {
        if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed") || e instanceof FileNotFoundException) {
            throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, e);
        }
        throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, splitError(e, path, start, length), e);
    }
    AggregatedMemoryContext systemMemoryUsage = newSimpleAggregatedMemoryContext();
    try {
        OrcDataSource readerLocalDataSource = OrcReader.wrapWithCacheIfTiny(orcDataSource, tinyStripeThreshold);
        OrcFileTail fileTail;
        if (orcCacheProperties.isFileTailCacheEnabled()) {
            try {
                OrcDataSourceIdWithTimeStamp orcDataSourceIdWithTimeStamp = new OrcDataSourceIdWithTimeStamp(readerLocalDataSource.getId(), readerLocalDataSource.getLastModifiedTime());
                fileTail = orcCacheStore.getFileTailCache().get(new OrcFileTailCacheKey(orcDataSourceIdWithTimeStamp), () -> OrcPageSourceFactory.createFileTail(orcDataSource));
            } catch (UncheckedExecutionException | ExecutionException executionException) {
                handleCacheLoadException(executionException);
                log.debug(executionException.getCause(), "Error while caching the Orc file tail. Falling back to default flow");
                fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
            }
        } else {
            fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
        }
        OrcReader reader = new OrcReader(readerLocalDataSource, fileTail, maxMergeDistance, tinyStripeThreshold, maxReadBlockSize);
        List<OrcColumn> fileColumns = reader.getRootColumn().getNestedColumns();
        List<OrcColumn> fileReadColumns = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
        List<Type> fileReadTypes = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
        ImmutableList<String> acidColumnNames = null;
        List<ColumnAdaptation> columnAdaptations = new ArrayList<>(columns.size());
        // Only Hive ACID files will begin with bucket_
        boolean fileNameContainsBucket = path.getName().contains("bucket");
        if (isFullAcid && fileNameContainsBucket) {
            // Skip the acid schema check in case of non-ACID files
            acidColumnNames = ImmutableList.<String>builder().add(ACID_COLUMN_ORIGINAL_TRANSACTION, ACID_COLUMN_BUCKET, ACID_COLUMN_ROW_ID, ACID_COLUMN_CURRENT_TRANSACTION, ACID_COLUMN_OPERATION).build();
            verifyAcidSchema(reader, path);
            Map<String, OrcColumn> acidColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
            if (AcidUtils.isDeleteDelta(path.getParent())) {
                // Avoid reading column data from delete_delta files.
                // Call will come here in case of Minor VACUUM where all delete_delta files are merge together.
                fileColumns = ImmutableList.of();
            } else {
                fileColumns = ensureColumnNameConsistency(acidColumnsByName.get(ACID_COLUMN_ROW_STRUCT).getNestedColumns(), columns);
            }
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ORIGINAL_TRANSACTION.toLowerCase(ENGLISH)));
            fileReadTypes.add(BIGINT);
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_BUCKET.toLowerCase(ENGLISH)));
            fileReadTypes.add(INTEGER);
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ROW_ID.toLowerCase(ENGLISH)));
            fileReadTypes.add(BIGINT);
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_CURRENT_TRANSACTION.toLowerCase(ENGLISH)));
            fileReadTypes.add(BIGINT);
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_OPERATION.toLowerCase(ENGLISH)));
            fileReadTypes.add(INTEGER);
        }
        Map<String, OrcColumn> fileColumnsByName = ImmutableMap.of();
        if (useOrcColumnNames || isFullAcid) {
            verifyFileHasColumnNames(fileColumns, path);
            // Convert column names read from ORC files to lower case to be consistent with those stored in Hive Metastore
            fileColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
        }
        TupleDomainOrcPredicateBuilder predicateBuilder = TupleDomainOrcPredicate.builder().setBloomFiltersEnabled(orcBloomFiltersEnabled);
        Map<HiveColumnHandle, Domain> effectivePredicateDomains = effectivePredicate.getDomains().orElseThrow(() -> new IllegalArgumentException("Effective predicate is none"));
        for (HiveColumnHandle column : columns) {
            OrcColumn orcColumn = null;
            if (useOrcColumnNames || isFullAcid) {
                orcColumn = fileColumnsByName.get(column.getName());
            } else if (column.getHiveColumnIndex() >= 0 && column.getHiveColumnIndex() < fileColumns.size()) {
                orcColumn = fileColumns.get(column.getHiveColumnIndex());
            }
            Type readType = typeManager.getType(column.getTypeSignature());
            if (orcColumn != null) {
                int sourceIndex = fileReadColumns.size();
                columnAdaptations.add(ColumnAdaptation.sourceColumn(sourceIndex));
                fileReadColumns.add(orcColumn);
                fileReadTypes.add(readType);
                Domain domain = effectivePredicateDomains.get(column);
                if (domain != null) {
                    predicateBuilder.addColumn(orcColumn.getColumnId(), domain);
                }
            } else if (isFullAcid && readType instanceof RowType && column.getName().equalsIgnoreCase(HiveColumnHandle.UPDATE_ROW_ID_COLUMN_NAME)) {
                HiveType hiveType = column.getHiveType();
                StructTypeInfo structTypeInfo = (StructTypeInfo) hiveType.getTypeInfo();
                ImmutableList.Builder<ColumnAdaptation> builder = new ImmutableList.Builder<>();
                ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
                List<ColumnAdaptation> adaptations = fieldNames.stream().map(acidColumnNames::indexOf).map(c -> ColumnAdaptation.sourceColumn(c, false)).collect(Collectors.toList());
                columnAdaptations.add(ColumnAdaptation.structColumn(structTypeInfo, adaptations));
            } else {
                columnAdaptations.add(ColumnAdaptation.nullColumn(readType));
            }
        }
        Map<String, Domain> domains = effectivePredicate.getDomains().get().entrySet().stream().collect(toMap(e -> e.getKey().getName(), Map.Entry::getValue));
        OrcRecordReader recordReader = reader.createRecordReader(fileReadColumns, fileReadTypes, predicateBuilder.build(), start, length, legacyFileTimeZone, systemMemoryUsage, INITIAL_BATCH_SIZE, exception -> handleException(orcDataSource.getId(), exception), indexes, splitMetadata, domains, orcCacheStore, orcCacheProperties, pageMetadataEnabled);
        OrcDeletedRows deletedRows = new OrcDeletedRows(path.getName(), deleteDeltaLocations, new OrcDeleteDeltaPageSourceFactory(sessionUser, configuration, hdfsEnvironment, maxMergeDistance, maxBufferSize, streamBufferSize, maxReadBlockSize, tinyStripeThreshold, lazyReadSmallRanges, orcBloomFiltersEnabled, stats), sessionUser, configuration, hdfsEnvironment, startRowOffsetOfFile);
        boolean eagerload = false;
        if (indexes.isPresent()) {
            eagerload = indexes.get().stream().anyMatch(indexMetadata -> EAGER_LOAD_INDEX_ID.contains(indexMetadata.getIndex().getId()));
        }
        return new OrcPageSource(recordReader, columnAdaptations, orcDataSource, deletedRows, eagerload, systemMemoryUsage, stats);
    } catch (Exception e) {
        try {
            orcDataSource.close();
        } catch (IOException ignored) {
        }
        if (e instanceof PrestoException) {
            throw (PrestoException) e;
        }
        String message = splitError(e, path, start, length);
        if (e instanceof BlockMissingException) {
            throw new PrestoException(HIVE_MISSING_DATA, message, e);
        }
        throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
    }
}
Also used : OrcReader(io.prestosql.orc.OrcReader) DateTimeZone(org.joda.time.DateTimeZone) LONG(io.prestosql.orc.metadata.OrcType.OrcTypeKind.LONG) TupleDomainOrcPredicate(io.prestosql.orc.TupleDomainOrcPredicate) FileSystem(org.apache.hadoop.fs.FileSystem) HiveSessionProperties.isOrcStripeFooterCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcStripeFooterCacheEnabled) Maps.uniqueIndex(com.google.common.collect.Maps.uniqueIndex) HiveSessionProperties.isOrcRowDataCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcRowDataCacheEnabled) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) AggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext) RowType(io.prestosql.spi.type.RowType) HiveSessionProperties.getOrcStreamBufferSize(io.prestosql.plugin.hive.HiveSessionProperties.getOrcStreamBufferSize) StructTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo) ENGLISH(java.util.Locale.ENGLISH) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) ColumnAdaptation(io.prestosql.plugin.hive.orc.OrcPageSource.ColumnAdaptation) FileFormatDataSourceStats(io.prestosql.plugin.hive.FileFormatDataSourceStats) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) ConnectorPageSource(io.prestosql.spi.connector.ConnectorPageSource) HIVE_BAD_DATA(io.prestosql.plugin.hive.HiveErrorCode.HIVE_BAD_DATA) OrcCacheProperties(io.prestosql.orc.OrcCacheProperties) Domain(io.prestosql.spi.predicate.Domain) SplitMetadata(io.prestosql.spi.heuristicindex.SplitMetadata) HiveSessionProperties.getOrcMaxBufferSize(io.prestosql.plugin.hive.HiveSessionProperties.getOrcMaxBufferSize) TupleDomainOrcPredicateBuilder(io.prestosql.orc.TupleDomainOrcPredicate.TupleDomainOrcPredicateBuilder) ArrayList(java.util.ArrayList) INITIAL_BATCH_SIZE(io.prestosql.orc.OrcReader.INITIAL_BATCH_SIZE) HdfsEnvironment(io.prestosql.plugin.hive.HdfsEnvironment) DynamicFilterSupplier(io.prestosql.spi.dynamicfilter.DynamicFilterSupplier) HiveSessionProperties.getOrcMaxReadBlockSize(io.prestosql.plugin.hive.HiveSessionProperties.getOrcMaxReadBlockSize) HiveSessionProperties.isOrcFileTailCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcFileTailCacheEnabled) HivePageSourceFactory(io.prestosql.plugin.hive.HivePageSourceFactory) Properties(java.util.Properties) DeleteDeltaLocations(io.prestosql.plugin.hive.DeleteDeltaLocations) TypeManager(io.prestosql.spi.type.TypeManager) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) HiveSessionProperties.isOrcBloomFiltersEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcBloomFiltersEnabled) OrcPageSource.handleException(io.prestosql.plugin.hive.orc.OrcPageSource.handleException) STRUCT(io.prestosql.orc.metadata.OrcType.OrcTypeKind.STRUCT) HiveSessionProperties.getOrcLazyReadSmallRanges(io.prestosql.plugin.hive.HiveSessionProperties.getOrcLazyReadSmallRanges) OrcRecordReader(io.prestosql.orc.OrcRecordReader) HiveColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle) OrcSerde(org.apache.hadoop.hive.ql.io.orc.OrcSerde) Seekable(org.apache.hadoop.fs.Seekable) HiveSessionProperties.getOrcTinyStripeThreshold(io.prestosql.plugin.hive.HiveSessionProperties.getOrcTinyStripeThreshold) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) Collectors.toMap(java.util.stream.Collectors.toMap) HiveConfig(io.prestosql.plugin.hive.HiveConfig) OrcDataSourceIdWithTimeStamp(io.prestosql.orc.OrcDataSourceIdWithTimeStamp) Path(org.apache.hadoop.fs.Path) Type(io.prestosql.spi.type.Type) BIGINT(io.prestosql.spi.type.BigintType.BIGINT) PrestoException(io.prestosql.spi.PrestoException) OrcFileTail(io.prestosql.orc.OrcFileTail) OrcTypeKind(io.prestosql.orc.metadata.OrcType.OrcTypeKind) ImmutableMap(com.google.common.collect.ImmutableMap) INT(io.prestosql.orc.metadata.OrcType.OrcTypeKind.INT) AcidUtils.isFullAcidTable(org.apache.hadoop.hive.ql.io.AcidUtils.isFullAcidTable) HIVE_FILE_MISSING_COLUMN_NAMES(io.prestosql.plugin.hive.HiveErrorCode.HIVE_FILE_MISSING_COLUMN_NAMES) HiveSessionProperties.isOrcBloomFiltersCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcBloomFiltersCacheEnabled) OrcDataSource(io.prestosql.orc.OrcDataSource) HiveType(io.prestosql.plugin.hive.HiveType) Collectors(java.util.stream.Collectors) FileNotFoundException(java.io.FileNotFoundException) String.format(java.lang.String.format) OrcColumn(io.prestosql.orc.OrcColumn) OrcFileTailCacheKey(io.prestosql.orc.OrcFileTailCacheKey) DataSize(io.airlift.units.DataSize) List(java.util.List) HiveSessionProperties.getOrcMaxMergeDistance(io.prestosql.plugin.hive.HiveSessionProperties.getOrcMaxMergeDistance) Function.identity(java.util.function.Function.identity) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) IndexMetadata(io.prestosql.spi.heuristicindex.IndexMetadata) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) AggregatedMemoryContext.newSimpleAggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext) HIVE_CANNOT_OPEN_SPLIT(io.prestosql.plugin.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT) Logger(io.airlift.log.Logger) FixedPageSource(io.prestosql.spi.connector.FixedPageSource) Strings.nullToEmpty(com.google.common.base.Strings.nullToEmpty) HashMap(java.util.HashMap) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) Inject(javax.inject.Inject) HIVE_MISSING_DATA(io.prestosql.plugin.hive.HiveErrorCode.HIVE_MISSING_DATA) ImmutableList(com.google.common.collect.ImmutableList) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) Objects.requireNonNull(java.util.Objects.requireNonNull) PositionedReadable(org.apache.hadoop.fs.PositionedReadable) HiveSessionProperties.isOrcRowIndexCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcRowIndexCacheEnabled) HiveUtil(io.prestosql.plugin.hive.HiveUtil) TupleDomain(io.prestosql.spi.predicate.TupleDomain) Maps(com.google.common.collect.Maps) OrcCacheStore(io.prestosql.orc.OrcCacheStore) OrcDataSourceId(io.prestosql.orc.OrcDataSourceId) OrcReader.handleCacheLoadException(io.prestosql.orc.OrcReader.handleCacheLoadException) InputStream(java.io.InputStream) TupleDomainOrcPredicateBuilder(io.prestosql.orc.TupleDomainOrcPredicate.TupleDomainOrcPredicateBuilder) FileNotFoundException(java.io.FileNotFoundException) ArrayList(java.util.ArrayList) OrcFileTail(io.prestosql.orc.OrcFileTail) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) OrcColumn(io.prestosql.orc.OrcColumn) OrcReader(io.prestosql.orc.OrcReader) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) OrcFileTailCacheKey(io.prestosql.orc.OrcFileTailCacheKey) Domain(io.prestosql.spi.predicate.Domain) TupleDomain(io.prestosql.spi.predicate.TupleDomain) HiveType(io.prestosql.plugin.hive.HiveType) Map(java.util.Map) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) Collectors.toMap(java.util.stream.Collectors.toMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) ColumnAdaptation(io.prestosql.plugin.hive.orc.OrcPageSource.ColumnAdaptation) ImmutableList(com.google.common.collect.ImmutableList) RowType(io.prestosql.spi.type.RowType) PrestoException(io.prestosql.spi.PrestoException) StructTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) ExecutionException(java.util.concurrent.ExecutionException) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) HiveColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle) OrcDataSource(io.prestosql.orc.OrcDataSource) OrcDataSourceId(io.prestosql.orc.OrcDataSourceId) OrcDataSourceIdWithTimeStamp(io.prestosql.orc.OrcDataSourceIdWithTimeStamp) IOException(java.io.IOException) OrcRecordReader(io.prestosql.orc.OrcRecordReader) AggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext) AggregatedMemoryContext.newSimpleAggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) OrcPageSource.handleException(io.prestosql.plugin.hive.orc.OrcPageSource.handleException) PrestoException(io.prestosql.spi.PrestoException) FileNotFoundException(java.io.FileNotFoundException) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) OrcReader.handleCacheLoadException(io.prestosql.orc.OrcReader.handleCacheLoadException) RowType(io.prestosql.spi.type.RowType) Type(io.prestosql.spi.type.Type) HiveType(io.prestosql.plugin.hive.HiveType) TupleDomainOrcPredicateBuilder(io.prestosql.orc.TupleDomainOrcPredicate.TupleDomainOrcPredicateBuilder)

Example 4 with HIVE_CANNOT_OPEN_SPLIT

use of io.prestosql.plugin.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT in project boostkit-bigdata by kunpengcompute.

the class OrcPageSourceFactory method createOrcPageSource.

public static OrcPageSource createOrcPageSource(HdfsEnvironment hdfsEnvironment, String sessionUser, Configuration configuration, Path path, long start, long length, long fileSize, List<HiveColumnHandle> columns, boolean useOrcColumnNames, boolean isFullAcid, TupleDomain<HiveColumnHandle> effectivePredicate, DateTimeZone legacyFileTimeZone, TypeManager typeManager, DataSize maxMergeDistance, DataSize maxBufferSize, DataSize streamBufferSize, DataSize tinyStripeThreshold, DataSize maxReadBlockSize, boolean lazyReadSmallRanges, boolean orcBloomFiltersEnabled, FileFormatDataSourceStats stats, Optional<DynamicFilterSupplier> dynamicFilters, Optional<DeleteDeltaLocations> deleteDeltaLocations, Optional<Long> startRowOffsetOfFile, Optional<List<IndexMetadata>> indexes, SplitMetadata splitMetadata, OrcCacheStore orcCacheStore, OrcCacheProperties orcCacheProperties, int domainCompactionThreshold, boolean pageMetadataEnabled, long dataSourceLastModifiedTime) {
    for (HiveColumnHandle column : columns) {
        checkArgument(column.getColumnType() == HiveColumnHandle.ColumnType.REGULAR || column.getHiveColumnIndex() == HiveColumnHandle.ROW_ID__COLUMN_INDEX, "column type must be regular: %s", column);
    }
    checkArgument(!effectivePredicate.isNone());
    OrcDataSource orcDataSource;
    try {
        // Always create a lazy Stream. HDFS stream opened only when required.
        FSDataInputStream inputStream = new FSDataInputStream(new LazyFSInputStream(() -> {
            FileSystem fileSystem = hdfsEnvironment.getFileSystem(sessionUser, path, configuration);
            return hdfsEnvironment.doAs(sessionUser, () -> fileSystem.open(path));
        }));
        orcDataSource = new HdfsOrcDataSource(new OrcDataSourceId(path.toString()), fileSize, maxMergeDistance, maxBufferSize, streamBufferSize, lazyReadSmallRanges, inputStream, stats, dataSourceLastModifiedTime);
    } catch (Exception e) {
        if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed") || e instanceof FileNotFoundException) {
            throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, e);
        }
        throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, splitError(e, path, start, length), e);
    }
    AggregatedMemoryContext systemMemoryUsage = newSimpleAggregatedMemoryContext();
    try {
        OrcDataSource readerLocalDataSource = OrcReader.wrapWithCacheIfTiny(orcDataSource, tinyStripeThreshold);
        OrcFileTail fileTail;
        if (orcCacheProperties.isFileTailCacheEnabled()) {
            try {
                OrcDataSourceIdWithTimeStamp orcDataSourceIdWithTimeStamp = new OrcDataSourceIdWithTimeStamp(readerLocalDataSource.getId(), readerLocalDataSource.getLastModifiedTime());
                fileTail = orcCacheStore.getFileTailCache().get(new OrcFileTailCacheKey(orcDataSourceIdWithTimeStamp), () -> OrcPageSourceFactory.createFileTail(orcDataSource));
            } catch (UncheckedExecutionException | ExecutionException executionException) {
                handleCacheLoadException(executionException);
                log.debug(executionException.getCause(), "Error while caching the Orc file tail. Falling back to default flow");
                fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
            }
        } else {
            fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
        }
        OrcReader reader = new OrcReader(readerLocalDataSource, fileTail, maxMergeDistance, tinyStripeThreshold, maxReadBlockSize);
        List<OrcColumn> fileColumns = reader.getRootColumn().getNestedColumns();
        List<OrcColumn> fileReadColumns = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
        List<Type> fileReadTypes = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
        ImmutableList<String> acidColumnNames = null;
        List<ColumnAdaptation> columnAdaptations = new ArrayList<>(columns.size());
        // Only Hive ACID files will begin with bucket_
        boolean fileNameContainsBucket = path.getName().contains("bucket");
        if (isFullAcid && fileNameContainsBucket) {
            // Skip the acid schema check in case of non-ACID files
            acidColumnNames = ImmutableList.<String>builder().add(ACID_COLUMN_ORIGINAL_TRANSACTION, ACID_COLUMN_BUCKET, ACID_COLUMN_ROW_ID, ACID_COLUMN_CURRENT_TRANSACTION, ACID_COLUMN_OPERATION).build();
            verifyAcidSchema(reader, path);
            Map<String, OrcColumn> acidColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
            if (AcidUtils.isDeleteDelta(path.getParent())) {
                // Avoid reading column data from delete_delta files.
                // Call will come here in case of Minor VACUUM where all delete_delta files are merge together.
                fileColumns = ImmutableList.of();
            } else {
                fileColumns = ensureColumnNameConsistency(acidColumnsByName.get(ACID_COLUMN_ROW_STRUCT).getNestedColumns(), columns);
            }
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ORIGINAL_TRANSACTION.toLowerCase(ENGLISH)));
            fileReadTypes.add(BIGINT);
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_BUCKET.toLowerCase(ENGLISH)));
            fileReadTypes.add(INTEGER);
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ROW_ID.toLowerCase(ENGLISH)));
            fileReadTypes.add(BIGINT);
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_CURRENT_TRANSACTION.toLowerCase(ENGLISH)));
            fileReadTypes.add(BIGINT);
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_OPERATION.toLowerCase(ENGLISH)));
            fileReadTypes.add(INTEGER);
        }
        Map<String, OrcColumn> fileColumnsByName = ImmutableMap.of();
        if (useOrcColumnNames || isFullAcid) {
            verifyFileHasColumnNames(fileColumns, path);
            // Convert column names read from ORC files to lower case to be consistent with those stored in Hive Metastore
            fileColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
        }
        TupleDomainOrcPredicateBuilder predicateBuilder = TupleDomainOrcPredicate.builder().setBloomFiltersEnabled(orcBloomFiltersEnabled);
        Map<HiveColumnHandle, Domain> effectivePredicateDomains = effectivePredicate.getDomains().orElseThrow(() -> new IllegalArgumentException("Effective predicate is none"));
        for (HiveColumnHandle column : columns) {
            OrcColumn orcColumn = null;
            if (useOrcColumnNames || isFullAcid) {
                orcColumn = fileColumnsByName.get(column.getName());
            } else if (column.getHiveColumnIndex() >= 0 && column.getHiveColumnIndex() < fileColumns.size()) {
                orcColumn = fileColumns.get(column.getHiveColumnIndex());
            }
            Type readType = typeManager.getType(column.getTypeSignature());
            if (orcColumn != null) {
                int sourceIndex = fileReadColumns.size();
                columnAdaptations.add(ColumnAdaptation.sourceColumn(sourceIndex));
                fileReadColumns.add(orcColumn);
                fileReadTypes.add(readType);
                Domain domain = effectivePredicateDomains.get(column);
                if (domain != null) {
                    predicateBuilder.addColumn(orcColumn.getColumnId(), domain);
                }
            } else if (isFullAcid && readType instanceof RowType && column.getName().equalsIgnoreCase(HiveColumnHandle.UPDATE_ROW_ID_COLUMN_NAME)) {
                HiveType hiveType = column.getHiveType();
                StructTypeInfo structTypeInfo = (StructTypeInfo) hiveType.getTypeInfo();
                ImmutableList.Builder<ColumnAdaptation> builder = new ImmutableList.Builder<>();
                ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
                List<ColumnAdaptation> adaptations = fieldNames.stream().map(acidColumnNames::indexOf).map(c -> ColumnAdaptation.sourceColumn(c, false)).collect(Collectors.toList());
                columnAdaptations.add(ColumnAdaptation.structColumn(structTypeInfo, adaptations));
            } else {
                columnAdaptations.add(ColumnAdaptation.nullColumn(readType));
            }
        }
        Map<String, Domain> domains = effectivePredicate.getDomains().get().entrySet().stream().collect(toMap(e -> e.getKey().getName(), Map.Entry::getValue));
        OrcRecordReader recordReader = reader.createRecordReader(fileReadColumns, fileReadTypes, predicateBuilder.build(), start, length, legacyFileTimeZone, systemMemoryUsage, INITIAL_BATCH_SIZE, exception -> handleException(orcDataSource.getId(), exception), indexes, splitMetadata, domains, orcCacheStore, orcCacheProperties, pageMetadataEnabled);
        OrcDeletedRows deletedRows = new OrcDeletedRows(path.getName(), deleteDeltaLocations, new OrcDeleteDeltaPageSourceFactory(sessionUser, configuration, hdfsEnvironment, maxMergeDistance, maxBufferSize, streamBufferSize, maxReadBlockSize, tinyStripeThreshold, lazyReadSmallRanges, orcBloomFiltersEnabled, stats), sessionUser, configuration, hdfsEnvironment, startRowOffsetOfFile);
        boolean eagerload = false;
        if (indexes.isPresent()) {
            eagerload = indexes.get().stream().anyMatch(indexMetadata -> EAGER_LOAD_INDEX_ID.contains(indexMetadata.getIndex().getId()));
        }
        return new OrcPageSource(recordReader, columnAdaptations, orcDataSource, deletedRows, eagerload, systemMemoryUsage, stats);
    } catch (Exception e) {
        try {
            orcDataSource.close();
        } catch (IOException ignored) {
        }
        if (e instanceof PrestoException) {
            throw (PrestoException) e;
        }
        String message = splitError(e, path, start, length);
        if (e instanceof BlockMissingException) {
            throw new PrestoException(HIVE_MISSING_DATA, message, e);
        }
        throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
    }
}
Also used : OrcReader(io.prestosql.orc.OrcReader) DateTimeZone(org.joda.time.DateTimeZone) LONG(io.prestosql.orc.metadata.OrcType.OrcTypeKind.LONG) TupleDomainOrcPredicate(io.prestosql.orc.TupleDomainOrcPredicate) FileSystem(org.apache.hadoop.fs.FileSystem) HiveSessionProperties.isOrcStripeFooterCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcStripeFooterCacheEnabled) Maps.uniqueIndex(com.google.common.collect.Maps.uniqueIndex) HiveSessionProperties.isOrcRowDataCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcRowDataCacheEnabled) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) AggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext) RowType(io.prestosql.spi.type.RowType) HiveSessionProperties.getOrcStreamBufferSize(io.prestosql.plugin.hive.HiveSessionProperties.getOrcStreamBufferSize) Predicate(com.huawei.boostkit.omnidata.model.Predicate) StructTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo) ENGLISH(java.util.Locale.ENGLISH) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) DataReaderFactory(com.huawei.boostkit.omnidata.reader.DataReaderFactory) ColumnAdaptation(io.prestosql.plugin.hive.orc.OrcPageSource.ColumnAdaptation) PageSourceUtil.buildPushdownContext(io.prestosql.plugin.hive.util.PageSourceUtil.buildPushdownContext) FileFormatDataSourceStats(io.prestosql.plugin.hive.FileFormatDataSourceStats) DataSource(com.huawei.boostkit.omnidata.model.datasource.DataSource) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) HiveOffloadExpression(io.prestosql.plugin.hive.HiveOffloadExpression) ConnectorPageSource(io.prestosql.spi.connector.ConnectorPageSource) HIVE_BAD_DATA(io.prestosql.plugin.hive.HiveErrorCode.HIVE_BAD_DATA) OrcCacheProperties(io.prestosql.orc.OrcCacheProperties) Domain(io.prestosql.spi.predicate.Domain) SplitMetadata(io.prestosql.spi.heuristicindex.SplitMetadata) HiveSessionProperties.getOrcMaxBufferSize(io.prestosql.plugin.hive.HiveSessionProperties.getOrcMaxBufferSize) TupleDomainOrcPredicateBuilder(io.prestosql.orc.TupleDomainOrcPredicate.TupleDomainOrcPredicateBuilder) ArrayList(java.util.ArrayList) INITIAL_BATCH_SIZE(io.prestosql.orc.OrcReader.INITIAL_BATCH_SIZE) HdfsEnvironment(io.prestosql.plugin.hive.HdfsEnvironment) DynamicFilterSupplier(io.prestosql.spi.dynamicfilter.DynamicFilterSupplier) HiveSessionProperties.getOrcMaxReadBlockSize(io.prestosql.plugin.hive.HiveSessionProperties.getOrcMaxReadBlockSize) HiveSessionProperties.isOrcFileTailCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcFileTailCacheEnabled) HivePageSourceFactory(io.prestosql.plugin.hive.HivePageSourceFactory) Properties(java.util.Properties) DeleteDeltaLocations(io.prestosql.plugin.hive.DeleteDeltaLocations) TypeManager(io.prestosql.spi.type.TypeManager) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) HiveSessionProperties.isOrcBloomFiltersEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcBloomFiltersEnabled) OrcPageSource.handleException(io.prestosql.plugin.hive.orc.OrcPageSource.handleException) STRUCT(io.prestosql.orc.metadata.OrcType.OrcTypeKind.STRUCT) HiveSessionProperties.getOrcLazyReadSmallRanges(io.prestosql.plugin.hive.HiveSessionProperties.getOrcLazyReadSmallRanges) OrcRecordReader(io.prestosql.orc.OrcRecordReader) HivePartitionKey(io.prestosql.plugin.hive.HivePartitionKey) HiveColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle) DataReader(com.huawei.boostkit.omnidata.reader.DataReader) OrcSerde(org.apache.hadoop.hive.ql.io.orc.OrcSerde) Seekable(org.apache.hadoop.fs.Seekable) HiveSessionProperties.getOrcTinyStripeThreshold(io.prestosql.plugin.hive.HiveSessionProperties.getOrcTinyStripeThreshold) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) Collectors.toMap(java.util.stream.Collectors.toMap) HiveConfig(io.prestosql.plugin.hive.HiveConfig) OrcDataSourceIdWithTimeStamp(io.prestosql.orc.OrcDataSourceIdWithTimeStamp) Path(org.apache.hadoop.fs.Path) OMNIDATA_CLIENT_TARGET_LIST(com.huawei.boostkit.omnidata.transfer.OmniDataProperty.OMNIDATA_CLIENT_TARGET_LIST) Type(io.prestosql.spi.type.Type) BIGINT(io.prestosql.spi.type.BigintType.BIGINT) PrestoException(io.prestosql.spi.PrestoException) OrcFileTail(io.prestosql.orc.OrcFileTail) OrcTypeKind(io.prestosql.orc.metadata.OrcType.OrcTypeKind) ImmutableMap(com.google.common.collect.ImmutableMap) INT(io.prestosql.orc.metadata.OrcType.OrcTypeKind.INT) AcidUtils.isFullAcidTable(org.apache.hadoop.hive.ql.io.AcidUtils.isFullAcidTable) HIVE_FILE_MISSING_COLUMN_NAMES(io.prestosql.plugin.hive.HiveErrorCode.HIVE_FILE_MISSING_COLUMN_NAMES) HiveSessionProperties.isOrcBloomFiltersCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcBloomFiltersCacheEnabled) HivePushDownPageSource(io.prestosql.plugin.hive.HivePushDownPageSource) OrcDataSource(io.prestosql.orc.OrcDataSource) HiveType(io.prestosql.plugin.hive.HiveType) Collectors(java.util.stream.Collectors) FileNotFoundException(java.io.FileNotFoundException) String.format(java.lang.String.format) OrcColumn(io.prestosql.orc.OrcColumn) OrcFileTailCacheKey(io.prestosql.orc.OrcFileTailCacheKey) DataSize(io.airlift.units.DataSize) List(java.util.List) HiveSessionProperties.getOrcMaxMergeDistance(io.prestosql.plugin.hive.HiveSessionProperties.getOrcMaxMergeDistance) Function.identity(java.util.function.Function.identity) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) IndexMetadata(io.prestosql.spi.heuristicindex.IndexMetadata) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) AggregatedMemoryContext.newSimpleAggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext) HIVE_CANNOT_OPEN_SPLIT(io.prestosql.plugin.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT) Logger(io.airlift.log.Logger) FixedPageSource(io.prestosql.spi.connector.FixedPageSource) Strings.nullToEmpty(com.google.common.base.Strings.nullToEmpty) HiveSessionProperties(io.prestosql.plugin.hive.HiveSessionProperties) HashMap(java.util.HashMap) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) OptionalInt(java.util.OptionalInt) TaskSource(com.huawei.boostkit.omnidata.model.TaskSource) Inject(javax.inject.Inject) HIVE_MISSING_DATA(io.prestosql.plugin.hive.HiveErrorCode.HIVE_MISSING_DATA) ImmutableList(com.google.common.collect.ImmutableList) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) Objects.requireNonNull(java.util.Objects.requireNonNull) PositionedReadable(org.apache.hadoop.fs.PositionedReadable) HiveSessionProperties.isOrcRowIndexCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcRowIndexCacheEnabled) HiveUtil(io.prestosql.plugin.hive.HiveUtil) TupleDomain(io.prestosql.spi.predicate.TupleDomain) Page(io.prestosql.spi.Page) Maps(com.google.common.collect.Maps) OrcCacheStore(io.prestosql.orc.OrcCacheStore) OpenLooKengDeserializer(com.huawei.boostkit.omnidata.decode.impl.OpenLooKengDeserializer) OrcDataSourceId(io.prestosql.orc.OrcDataSourceId) OrcReader.handleCacheLoadException(io.prestosql.orc.OrcReader.handleCacheLoadException) InputStream(java.io.InputStream) TupleDomainOrcPredicateBuilder(io.prestosql.orc.TupleDomainOrcPredicate.TupleDomainOrcPredicateBuilder) FileNotFoundException(java.io.FileNotFoundException) ArrayList(java.util.ArrayList) OrcFileTail(io.prestosql.orc.OrcFileTail) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) OrcColumn(io.prestosql.orc.OrcColumn) OrcReader(io.prestosql.orc.OrcReader) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) OrcFileTailCacheKey(io.prestosql.orc.OrcFileTailCacheKey) Domain(io.prestosql.spi.predicate.Domain) TupleDomain(io.prestosql.spi.predicate.TupleDomain) HiveType(io.prestosql.plugin.hive.HiveType) Map(java.util.Map) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) Collectors.toMap(java.util.stream.Collectors.toMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) ColumnAdaptation(io.prestosql.plugin.hive.orc.OrcPageSource.ColumnAdaptation) ImmutableList(com.google.common.collect.ImmutableList) RowType(io.prestosql.spi.type.RowType) PrestoException(io.prestosql.spi.PrestoException) StructTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) ExecutionException(java.util.concurrent.ExecutionException) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) HiveColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle) OrcDataSource(io.prestosql.orc.OrcDataSource) OrcDataSourceId(io.prestosql.orc.OrcDataSourceId) OrcDataSourceIdWithTimeStamp(io.prestosql.orc.OrcDataSourceIdWithTimeStamp) IOException(java.io.IOException) OrcRecordReader(io.prestosql.orc.OrcRecordReader) AggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext) AggregatedMemoryContext.newSimpleAggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) OrcPageSource.handleException(io.prestosql.plugin.hive.orc.OrcPageSource.handleException) PrestoException(io.prestosql.spi.PrestoException) FileNotFoundException(java.io.FileNotFoundException) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) OrcReader.handleCacheLoadException(io.prestosql.orc.OrcReader.handleCacheLoadException) RowType(io.prestosql.spi.type.RowType) Type(io.prestosql.spi.type.Type) HiveType(io.prestosql.plugin.hive.HiveType) TupleDomainOrcPredicateBuilder(io.prestosql.orc.TupleDomainOrcPredicate.TupleDomainOrcPredicateBuilder)

Aggregations

Preconditions.checkArgument (com.google.common.base.Preconditions.checkArgument)4 Strings.nullToEmpty (com.google.common.base.Strings.nullToEmpty)4 ImmutableList (com.google.common.collect.ImmutableList)4 ImmutableMap (com.google.common.collect.ImmutableMap)4 DataSize (io.airlift.units.DataSize)4 AggregatedMemoryContext (io.prestosql.memory.context.AggregatedMemoryContext)4 AggregatedMemoryContext.newSimpleAggregatedMemoryContext (io.prestosql.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext)4 DeleteDeltaLocations (io.prestosql.plugin.hive.DeleteDeltaLocations)4 FileFormatDataSourceStats (io.prestosql.plugin.hive.FileFormatDataSourceStats)4 HdfsEnvironment (io.prestosql.plugin.hive.HdfsEnvironment)4 HiveColumnHandle (io.prestosql.plugin.hive.HiveColumnHandle)4 HiveConfig (io.prestosql.plugin.hive.HiveConfig)4 HIVE_BAD_DATA (io.prestosql.plugin.hive.HiveErrorCode.HIVE_BAD_DATA)4 HIVE_CANNOT_OPEN_SPLIT (io.prestosql.plugin.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT)4 HIVE_MISSING_DATA (io.prestosql.plugin.hive.HiveErrorCode.HIVE_MISSING_DATA)4 HivePageSourceFactory (io.prestosql.plugin.hive.HivePageSourceFactory)4 PrestoException (io.prestosql.spi.PrestoException)3 ConnectorPageSource (io.prestosql.spi.connector.ConnectorPageSource)3 ConnectorSession (io.prestosql.spi.connector.ConnectorSession)3 DynamicFilterSupplier (io.prestosql.spi.dynamicfilter.DynamicFilterSupplier)3