Search in sources :

Example 1 with StatisticsKind

use of org.apache.drill.metastore.statistics.StatisticsKind in project drill by apache.

the class MetadataControllerBatch method getColumnStatistics.

private Map<SchemaPath, ColumnStatistics<?>> getColumnStatistics(TupleReader reader, TupleMetadata columnMetadata, Long rowCount) {
    Multimap<String, StatisticsHolder<?>> columnStatistics = ArrayListMultimap.create();
    Map<String, TypeProtos.MinorType> columnTypes = new HashMap<>();
    for (ColumnMetadata column : columnMetadata) {
        if (AnalyzeColumnUtils.isColumnStatisticsField(column.name())) {
            String fieldName = AnalyzeColumnUtils.getColumnName(column.name());
            StatisticsKind<?> statisticsKind = AnalyzeColumnUtils.getStatisticsKind(column.name());
            columnStatistics.put(fieldName, new StatisticsHolder<>(getConvertedColumnValue(reader.column(column.name())), statisticsKind));
            if (statisticsKind.getName().equalsIgnoreCase(ColumnStatisticsKind.MIN_VALUE.getName()) || statisticsKind.getName().equalsIgnoreCase(ColumnStatisticsKind.MAX_VALUE.getName())) {
                columnTypes.putIfAbsent(fieldName, column.type());
            }
        }
    }
    // adds NON_NULL_COUNT to use it during filter pushdown
    if (rowCount != null) {
        Map<String, StatisticsHolder<?>> nullsCountColumnStatistics = new HashMap<>();
        columnStatistics.asMap().forEach((key, value) -> value.stream().filter(statisticsHolder -> statisticsHolder.getStatisticsKind() == ColumnStatisticsKind.NON_NULL_VALUES_COUNT).findAny().map(statisticsHolder -> (Long) statisticsHolder.getStatisticsValue()).ifPresent(nonNullCount -> nullsCountColumnStatistics.put(key, new StatisticsHolder<>(rowCount - nonNullCount, ColumnStatisticsKind.NULLS_COUNT))));
        nullsCountColumnStatistics.forEach(columnStatistics::put);
    }
    Map<SchemaPath, ColumnStatistics<?>> resultingStats = new HashMap<>();
    columnStatistics.asMap().forEach((fieldName, statisticsHolders) -> resultingStats.put(SchemaPath.parseFromString(fieldName), new ColumnStatistics<>(statisticsHolders, columnTypes.get(fieldName))));
    return resultingStats;
}
Also used : MetadataType(org.apache.drill.metastore.metadata.MetadataType) BaseStatisticsKind(org.apache.drill.metastore.statistics.BaseStatisticsKind) MetastoreColumn(org.apache.drill.metastore.MetastoreColumn) UserException(org.apache.drill.common.exceptions.UserException) LoggerFactory(org.slf4j.LoggerFactory) Types(org.apache.drill.common.types.Types) MetadataInfo(org.apache.drill.metastore.metadata.MetadataInfo) RowSetReader(org.apache.drill.exec.physical.rowSet.RowSetReader) VectorContainer(org.apache.drill.exec.record.VectorContainer) ColumnStatistics(org.apache.drill.metastore.statistics.ColumnStatistics) ArrayListMultimap(org.apache.drill.shaded.guava.com.google.common.collect.ArrayListMultimap) StringUtils(org.apache.commons.lang3.StringUtils) ArrayReader(org.apache.drill.exec.vector.accessor.ArrayReader) BaseTableMetadata(org.apache.drill.metastore.metadata.BaseTableMetadata) StatisticsRecordWriterImpl(org.apache.drill.exec.store.StatisticsRecordWriterImpl) PartitionMetadata(org.apache.drill.metastore.metadata.PartitionMetadata) Map(java.util.Map) FieldConverter(org.apache.drill.exec.store.EventBasedRecordWriter.FieldConverter) Path(org.apache.hadoop.fs.Path) BatchSchema(org.apache.drill.exec.record.BatchSchema) ColumnMetadata(org.apache.drill.exec.record.metadata.ColumnMetadata) Multimap(org.apache.drill.shaded.guava.com.google.common.collect.Multimap) ColumnStatisticsKind(org.apache.drill.metastore.statistics.ColumnStatisticsKind) Delete(org.apache.drill.metastore.operate.Delete) TableMetadataUnit(org.apache.drill.metastore.components.tables.TableMetadataUnit) SegmentMetadata(org.apache.drill.metastore.metadata.SegmentMetadata) SchemaPath(org.apache.drill.common.expression.SchemaPath) RecordBatch(org.apache.drill.exec.record.RecordBatch) Set(java.util.Set) MetastoreAnalyzeConstants(org.apache.drill.exec.metastore.analyze.MetastoreAnalyzeConstants) Collectors(java.util.stream.Collectors) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) FieldReader(org.apache.drill.exec.vector.complex.reader.FieldReader) TypeProtos(org.apache.drill.common.types.TypeProtos) List(java.util.List) AbstractBinaryRecordBatch(org.apache.drill.exec.record.AbstractBinaryRecordBatch) Preconditions(org.apache.drill.shaded.guava.com.google.common.base.Preconditions) ObjectReader(org.apache.drill.exec.vector.accessor.ObjectReader) TableInfo(org.apache.drill.metastore.metadata.TableInfo) MetadataIdentifierUtils(org.apache.drill.exec.metastore.analyze.MetadataIdentifierUtils) TupleReader(org.apache.drill.exec.vector.accessor.TupleReader) Modify(org.apache.drill.metastore.operate.Modify) MetadataControllerContext(org.apache.drill.exec.metastore.analyze.MetadataControllerContext) HashMap(java.util.HashMap) BitVector(org.apache.drill.exec.vector.BitVector) Function(java.util.function.Function) VectorWrapper(org.apache.drill.exec.record.VectorWrapper) ArrayList(java.util.ArrayList) ColumnNamesOptions(org.apache.drill.exec.metastore.ColumnNamesOptions) HashSet(java.util.HashSet) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException) DirectRowSet(org.apache.drill.exec.physical.rowSet.DirectRowSet) DrillStatsTable(org.apache.drill.exec.planner.common.DrillStatsTable) WriterPrel(org.apache.drill.exec.planner.physical.WriterPrel) TableStatisticsKind(org.apache.drill.metastore.statistics.TableStatisticsKind) FragmentContext(org.apache.drill.exec.ops.FragmentContext) FileMetadata(org.apache.drill.metastore.metadata.FileMetadata) StatisticsRecordCollector(org.apache.drill.exec.store.StatisticsRecordCollector) BaseMetadata(org.apache.drill.metastore.metadata.BaseMetadata) Logger(org.slf4j.Logger) ExactStatisticsConstants(org.apache.drill.metastore.statistics.ExactStatisticsConstants) RowGroupMetadata(org.apache.drill.metastore.metadata.RowGroupMetadata) StatisticsKind(org.apache.drill.metastore.statistics.StatisticsKind) IOException(java.io.IOException) FilterExpression(org.apache.drill.metastore.expressions.FilterExpression) StatisticsCollectorImpl(org.apache.drill.exec.store.easy.json.StatisticsCollectorImpl) PlannerSettings(org.apache.drill.exec.planner.physical.PlannerSettings) ParquetTableMetadataUtils(org.apache.drill.exec.store.parquet.ParquetTableMetadataUtils) VarCharVector(org.apache.drill.exec.vector.VarCharVector) StatisticsHolder(org.apache.drill.metastore.statistics.StatisticsHolder) MetadataControllerPOP(org.apache.drill.exec.physical.config.MetadataControllerPOP) Tables(org.apache.drill.metastore.components.tables.Tables) Collections(java.util.Collections) AnalyzeColumnUtils(org.apache.drill.exec.metastore.analyze.AnalyzeColumnUtils) MetastoreTableInfo(org.apache.drill.metastore.components.tables.MetastoreTableInfo) ObjectType(org.apache.drill.exec.vector.accessor.ObjectType) ColumnStatistics(org.apache.drill.metastore.statistics.ColumnStatistics) ColumnMetadata(org.apache.drill.exec.record.metadata.ColumnMetadata) HashMap(java.util.HashMap) StatisticsHolder(org.apache.drill.metastore.statistics.StatisticsHolder) SchemaPath(org.apache.drill.common.expression.SchemaPath)

Example 2 with StatisticsKind

use of org.apache.drill.metastore.statistics.StatisticsKind in project drill by apache.

the class ConvertMetadataAggregateToDirectScanRule method populateRecords.

/**
 * Populates records list with row group metadata.
 */
private DirectGroupScan populateRecords(Collection<SchemaPath> interestingColumns, Map<String, Class<?>> schema, DrillScanRel scan, ColumnNamesOptions columnNamesOptions) throws IOException {
    ParquetGroupScan parquetGroupScan = (ParquetGroupScan) scan.getGroupScan();
    DrillTable drillTable = Utilities.getDrillTable(scan.getTable());
    Multimap<Path, RowGroupMetadata> rowGroupsMetadataMap = parquetGroupScan.getMetadataProvider().getRowGroupsMetadataMap();
    Table<String, Integer, Object> recordsTable = HashBasedTable.create();
    FormatSelection selection = (FormatSelection) drillTable.getSelection();
    List<String> partitionColumnNames = ColumnExplorer.getPartitionColumnNames(selection.getSelection(), columnNamesOptions);
    FileSystem rawFs = selection.getSelection().getSelectionRoot().getFileSystem(new Configuration());
    DrillFileSystem fileSystem = ImpersonationUtil.createFileSystem(ImpersonationUtil.getProcessUserName(), rawFs.getConf());
    int rowIndex = 0;
    for (Map.Entry<Path, RowGroupMetadata> rgEntry : rowGroupsMetadataMap.entries()) {
        Path path = rgEntry.getKey();
        RowGroupMetadata rowGroupMetadata = rgEntry.getValue();
        List<String> partitionValues = ColumnExplorer.listPartitionValues(path, selection.getSelection().getSelectionRoot(), false);
        for (int i = 0; i < partitionValues.size(); i++) {
            String partitionColumnName = partitionColumnNames.get(i);
            recordsTable.put(partitionColumnName, rowIndex, partitionValues.get(i));
        }
        recordsTable.put(MetastoreAnalyzeConstants.LOCATION_FIELD, rowIndex, ImplicitFileColumns.FQN.getValue(path));
        recordsTable.put(columnNamesOptions.rowGroupIndex(), rowIndex, String.valueOf(rowGroupMetadata.getRowGroupIndex()));
        if (interestingColumns == null) {
            interestingColumns = rowGroupMetadata.getColumnsStatistics().keySet();
        }
        // populates record list with row group column metadata
        for (SchemaPath schemaPath : interestingColumns) {
            ColumnStatistics<?> columnStatistics = rowGroupMetadata.getColumnsStatistics().get(schemaPath);
            // do not gather statistics for array columns as it is not supported by Metastore
            if (containsArrayColumn(rowGroupMetadata.getSchema(), schemaPath)) {
                continue;
            }
            if (IsPredicate.isNullOrEmpty(columnStatistics)) {
                logger.debug("Statistics for {} column wasn't found within {} row group.", schemaPath, path);
                return null;
            }
            for (StatisticsKind<?> statisticsKind : AnalyzeColumnUtils.COLUMN_STATISTICS_FUNCTIONS.keySet()) {
                Object statsValue;
                if (statisticsKind.getName().equalsIgnoreCase(TableStatisticsKind.ROW_COUNT.getName())) {
                    statsValue = TableStatisticsKind.ROW_COUNT.getValue(rowGroupMetadata);
                } else if (statisticsKind.getName().equalsIgnoreCase(ColumnStatisticsKind.NON_NULL_VALUES_COUNT.getName())) {
                    statsValue = TableStatisticsKind.ROW_COUNT.getValue(rowGroupMetadata) - ColumnStatisticsKind.NULLS_COUNT.getFrom(columnStatistics);
                } else {
                    statsValue = columnStatistics.get(statisticsKind);
                }
                String columnStatisticsFieldName = AnalyzeColumnUtils.getColumnStatisticsFieldName(schemaPath.toExpr(), statisticsKind);
                if (statsValue != null) {
                    schema.putIfAbsent(columnStatisticsFieldName, statsValue.getClass());
                    recordsTable.put(columnStatisticsFieldName, rowIndex, statsValue);
                } else {
                    recordsTable.put(columnStatisticsFieldName, rowIndex, BaseParquetMetadataProvider.NULL_VALUE);
                }
            }
        }
        // populates record list with row group metadata
        for (StatisticsKind<?> statisticsKind : AnalyzeColumnUtils.META_STATISTICS_FUNCTIONS.keySet()) {
            String metadataStatisticsFieldName = AnalyzeColumnUtils.getMetadataStatisticsFieldName(statisticsKind);
            Object statisticsValue = rowGroupMetadata.getStatistic(statisticsKind);
            if (statisticsValue != null) {
                schema.putIfAbsent(metadataStatisticsFieldName, statisticsValue.getClass());
                recordsTable.put(metadataStatisticsFieldName, rowIndex, statisticsValue);
            } else {
                recordsTable.put(metadataStatisticsFieldName, rowIndex, BaseParquetMetadataProvider.NULL_VALUE);
            }
        }
        // populates record list internal columns
        recordsTable.put(MetastoreAnalyzeConstants.SCHEMA_FIELD, rowIndex, rowGroupMetadata.getSchema().jsonString());
        recordsTable.put(columnNamesOptions.rowGroupStart(), rowIndex, Long.toString(rowGroupMetadata.getStatistic(() -> ExactStatisticsConstants.START)));
        recordsTable.put(columnNamesOptions.rowGroupLength(), rowIndex, Long.toString(rowGroupMetadata.getStatistic(() -> ExactStatisticsConstants.LENGTH)));
        recordsTable.put(columnNamesOptions.lastModifiedTime(), rowIndex, String.valueOf(fileSystem.getFileStatus(path).getModificationTime()));
        rowIndex++;
    }
    // DynamicPojoRecordReader requires LinkedHashMap with fields order
    // which corresponds to the value position in record list.
    LinkedHashMap<String, Class<?>> orderedSchema = new LinkedHashMap<>();
    for (String s : recordsTable.rowKeySet()) {
        Class<?> clazz = schema.get(s);
        if (clazz != null) {
            orderedSchema.put(s, clazz);
        } else {
            return null;
        }
    }
    IntFunction<List<Object>> collectRecord = currentIndex -> orderedSchema.keySet().stream().map(column -> recordsTable.get(column, currentIndex)).map(value -> value != BaseParquetMetadataProvider.NULL_VALUE ? value : null).collect(Collectors.toList());
    List<List<Object>> records = IntStream.range(0, rowIndex).mapToObj(collectRecord).collect(Collectors.toList());
    DynamicPojoRecordReader<?> reader = new DynamicPojoRecordReader<>(orderedSchema, records);
    ScanStats scanStats = new ScanStats(ScanStats.GroupScanProperty.EXACT_ROW_COUNT, records.size(), 1, schema.size());
    return new DirectGroupScan(reader, scanStats);
}
Also used : MetadataType(org.apache.drill.metastore.metadata.MetadataType) FileSystem(org.apache.hadoop.fs.FileSystem) IsPredicate(org.apache.drill.exec.expr.IsPredicate) LoggerFactory(org.slf4j.LoggerFactory) ColumnStatistics(org.apache.drill.metastore.statistics.ColumnStatistics) DrillFileSystem(org.apache.drill.exec.store.dfs.DrillFileSystem) DictColumnMetadata(org.apache.drill.exec.record.metadata.DictColumnMetadata) PathSegment(org.apache.drill.common.expression.PathSegment) Utilities(org.apache.drill.exec.util.Utilities) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) ColumnMetadata(org.apache.drill.exec.record.metadata.ColumnMetadata) Multimap(org.apache.drill.shaded.guava.com.google.common.collect.Multimap) ColumnStatisticsKind(org.apache.drill.metastore.statistics.ColumnStatisticsKind) Collection(java.util.Collection) SchemaPath(org.apache.drill.common.expression.SchemaPath) BaseParquetMetadataProvider(org.apache.drill.exec.store.parquet.BaseParquetMetadataProvider) MetastoreAnalyzeConstants(org.apache.drill.exec.metastore.analyze.MetastoreAnalyzeConstants) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) ImplicitFileColumns(org.apache.drill.exec.store.ColumnExplorer.ImplicitFileColumns) Collectors(java.util.stream.Collectors) DynamicPojoRecordReader(org.apache.drill.exec.store.pojo.DynamicPojoRecordReader) List(java.util.List) MetadataAggregateContext(org.apache.drill.exec.metastore.analyze.MetadataAggregateContext) IntStream(java.util.stream.IntStream) Table(org.apache.drill.shaded.guava.com.google.common.collect.Table) ColumnExplorer(org.apache.drill.exec.store.ColumnExplorer) Function(java.util.function.Function) ColumnNamesOptions(org.apache.drill.exec.metastore.ColumnNamesOptions) LinkedHashMap(java.util.LinkedHashMap) FormatSelection(org.apache.drill.exec.store.dfs.FormatSelection) ImpersonationUtil(org.apache.drill.exec.util.ImpersonationUtil) TableStatisticsKind(org.apache.drill.metastore.statistics.TableStatisticsKind) ParquetGroupScan(org.apache.drill.exec.store.parquet.ParquetGroupScan) IntFunction(java.util.function.IntFunction) PrelUtil(org.apache.drill.exec.planner.physical.PrelUtil) Logger(org.slf4j.Logger) ScanStats(org.apache.drill.exec.physical.base.ScanStats) ExactStatisticsConstants(org.apache.drill.metastore.statistics.ExactStatisticsConstants) RowGroupMetadata(org.apache.drill.metastore.metadata.RowGroupMetadata) StatisticsKind(org.apache.drill.metastore.statistics.StatisticsKind) HashBasedTable(org.apache.drill.shaded.guava.com.google.common.collect.HashBasedTable) IOException(java.io.IOException) RelNode(org.apache.calcite.rel.RelNode) RelOptRuleCall(org.apache.calcite.plan.RelOptRuleCall) DirectGroupScan(org.apache.drill.exec.store.direct.DirectGroupScan) RelOptRule(org.apache.calcite.plan.RelOptRule) PlannerSettings(org.apache.drill.exec.planner.physical.PlannerSettings) GroupScan(org.apache.drill.exec.physical.base.GroupScan) AnalyzeColumnUtils(org.apache.drill.exec.metastore.analyze.AnalyzeColumnUtils) Configuration(org.apache.hadoop.conf.Configuration) FormatSelection(org.apache.drill.exec.store.dfs.FormatSelection) LinkedHashMap(java.util.LinkedHashMap) DrillFileSystem(org.apache.drill.exec.store.dfs.DrillFileSystem) SchemaPath(org.apache.drill.common.expression.SchemaPath) FileSystem(org.apache.hadoop.fs.FileSystem) DrillFileSystem(org.apache.drill.exec.store.dfs.DrillFileSystem) List(java.util.List) Path(org.apache.hadoop.fs.Path) SchemaPath(org.apache.drill.common.expression.SchemaPath) DynamicPojoRecordReader(org.apache.drill.exec.store.pojo.DynamicPojoRecordReader) DirectGroupScan(org.apache.drill.exec.store.direct.DirectGroupScan) RowGroupMetadata(org.apache.drill.metastore.metadata.RowGroupMetadata) ParquetGroupScan(org.apache.drill.exec.store.parquet.ParquetGroupScan) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ScanStats(org.apache.drill.exec.physical.base.ScanStats)

Aggregations

IOException (java.io.IOException)2 List (java.util.List)2 Map (java.util.Map)2 Function (java.util.function.Function)2 Collectors (java.util.stream.Collectors)2 SchemaPath (org.apache.drill.common.expression.SchemaPath)2 ColumnNamesOptions (org.apache.drill.exec.metastore.ColumnNamesOptions)2 AnalyzeColumnUtils (org.apache.drill.exec.metastore.analyze.AnalyzeColumnUtils)2 MetastoreAnalyzeConstants (org.apache.drill.exec.metastore.analyze.MetastoreAnalyzeConstants)2 PlannerSettings (org.apache.drill.exec.planner.physical.PlannerSettings)2 ColumnMetadata (org.apache.drill.exec.record.metadata.ColumnMetadata)2 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)2 MetadataType (org.apache.drill.metastore.metadata.MetadataType)2 RowGroupMetadata (org.apache.drill.metastore.metadata.RowGroupMetadata)2 ColumnStatistics (org.apache.drill.metastore.statistics.ColumnStatistics)2 ColumnStatisticsKind (org.apache.drill.metastore.statistics.ColumnStatisticsKind)2 ExactStatisticsConstants (org.apache.drill.metastore.statistics.ExactStatisticsConstants)2 StatisticsKind (org.apache.drill.metastore.statistics.StatisticsKind)2 TableStatisticsKind (org.apache.drill.metastore.statistics.TableStatisticsKind)2 Multimap (org.apache.drill.shaded.guava.com.google.common.collect.Multimap)2