use of org.apache.drill.metastore.statistics.ColumnStatistics in project drill by apache.
the class MetadataControllerBatch method getTableMetadata.
private BaseTableMetadata getTableMetadata(TupleReader reader, List<StatisticsHolder<?>> metadataStatistics, Map<SchemaPath, ColumnStatistics<?>> columnStatistics) {
List<StatisticsHolder<?>> updatedMetaStats = new ArrayList<>(metadataStatistics);
updatedMetaStats.add(new StatisticsHolder<>(popConfig.getContext().analyzeMetadataLevel(), TableStatisticsKind.ANALYZE_METADATA_LEVEL));
MetadataInfo metadataInfo = MetadataInfo.builder().type(MetadataType.TABLE).key(MetadataInfo.GENERAL_INFO_KEY).build();
BaseTableMetadata tableMetadata = BaseTableMetadata.builder().tableInfo(tableInfo).metadataInfo(metadataInfo).columnsStatistics(columnStatistics).metadataStatistics(updatedMetaStats).partitionKeys(Collections.emptyMap()).interestingColumns(popConfig.getContext().interestingColumns()).location(popConfig.getContext().location()).lastModifiedTime(Long.parseLong(reader.column(columnNamesOptions.lastModifiedTime()).scalar().getString())).schema(TupleMetadata.of(reader.column(MetastoreAnalyzeConstants.SCHEMA_FIELD).scalar().getString())).build();
if (context.getOptions().getOption(PlannerSettings.STATISTICS_USE)) {
DrillStatsTable statistics = new DrillStatsTable(statisticsCollector.getStatistics());
Map<SchemaPath, ColumnStatistics<?>> tableColumnStatistics = ParquetTableMetadataUtils.getColumnStatistics(tableMetadata.getSchema(), statistics);
tableMetadata = tableMetadata.cloneWithStats(tableColumnStatistics, DrillStatsTable.getEstimatedTableStats(statistics));
}
return tableMetadata;
}
use of org.apache.drill.metastore.statistics.ColumnStatistics in project drill by apache.
the class MetadataControllerBatch method getMetadataUnits.
private List<TableMetadataUnit> getMetadataUnits(TupleReader reader, int nestingLevel) {
List<TableMetadataUnit> metadataUnits = new ArrayList<>();
TupleMetadata columnMetadata = reader.tupleSchema();
ObjectReader metadataColumnReader = reader.column(MetastoreAnalyzeConstants.METADATA_TYPE);
Preconditions.checkNotNull(metadataColumnReader, "metadataType column wasn't found");
ObjectReader underlyingMetadataReader = reader.column(MetastoreAnalyzeConstants.COLLECTED_MAP_FIELD);
if (underlyingMetadataReader != null) {
if (!underlyingMetadataReader.schema().isArray()) {
throw new IllegalStateException("Incoming vector with name `collected_map` should be repeated map");
}
// current row contains information about underlying metadata
ArrayReader array = underlyingMetadataReader.array();
while (array.next()) {
metadataUnits.addAll(getMetadataUnits(array.tuple(), nestingLevel + 1));
}
}
List<StatisticsHolder<?>> metadataStatistics = getMetadataStatistics(reader, columnMetadata);
Long rowCount = (Long) metadataStatistics.stream().filter(statisticsHolder -> statisticsHolder.getStatisticsKind() == TableStatisticsKind.ROW_COUNT).findAny().map(StatisticsHolder::getStatisticsValue).orElse(null);
Map<SchemaPath, ColumnStatistics<?>> columnStatistics = getColumnStatistics(reader, columnMetadata, rowCount);
MetadataType metadataType = MetadataType.valueOf(metadataColumnReader.scalar().getString());
BaseMetadata metadata;
switch(metadataType) {
case TABLE:
{
metadata = getTableMetadata(reader, metadataStatistics, columnStatistics);
break;
}
case SEGMENT:
{
metadata = getSegmentMetadata(reader, metadataStatistics, columnStatistics, nestingLevel);
break;
}
case PARTITION:
{
metadata = getPartitionMetadata(reader, metadataStatistics, columnStatistics, nestingLevel);
break;
}
case FILE:
{
metadata = getFileMetadata(reader, metadataStatistics, columnStatistics, nestingLevel);
break;
}
case ROW_GROUP:
{
metadata = getRowGroupMetadata(reader, metadataStatistics, columnStatistics, nestingLevel);
break;
}
default:
throw new UnsupportedOperationException("Unsupported metadata type: " + metadataType);
}
metadataUnits.add(metadata.toMetadataUnit());
return metadataUnits;
}
use of org.apache.drill.metastore.statistics.ColumnStatistics in project drill by apache.
the class MetadataControllerBatch method getColumnStatistics.
private Map<SchemaPath, ColumnStatistics<?>> getColumnStatistics(TupleReader reader, TupleMetadata columnMetadata, Long rowCount) {
Multimap<String, StatisticsHolder<?>> columnStatistics = ArrayListMultimap.create();
Map<String, TypeProtos.MinorType> columnTypes = new HashMap<>();
for (ColumnMetadata column : columnMetadata) {
if (AnalyzeColumnUtils.isColumnStatisticsField(column.name())) {
String fieldName = AnalyzeColumnUtils.getColumnName(column.name());
StatisticsKind<?> statisticsKind = AnalyzeColumnUtils.getStatisticsKind(column.name());
columnStatistics.put(fieldName, new StatisticsHolder<>(getConvertedColumnValue(reader.column(column.name())), statisticsKind));
if (statisticsKind.getName().equalsIgnoreCase(ColumnStatisticsKind.MIN_VALUE.getName()) || statisticsKind.getName().equalsIgnoreCase(ColumnStatisticsKind.MAX_VALUE.getName())) {
columnTypes.putIfAbsent(fieldName, column.type());
}
}
}
// adds NON_NULL_COUNT to use it during filter pushdown
if (rowCount != null) {
Map<String, StatisticsHolder<?>> nullsCountColumnStatistics = new HashMap<>();
columnStatistics.asMap().forEach((key, value) -> value.stream().filter(statisticsHolder -> statisticsHolder.getStatisticsKind() == ColumnStatisticsKind.NON_NULL_VALUES_COUNT).findAny().map(statisticsHolder -> (Long) statisticsHolder.getStatisticsValue()).ifPresent(nonNullCount -> nullsCountColumnStatistics.put(key, new StatisticsHolder<>(rowCount - nonNullCount, ColumnStatisticsKind.NULLS_COUNT))));
nullsCountColumnStatistics.forEach(columnStatistics::put);
}
Map<SchemaPath, ColumnStatistics<?>> resultingStats = new HashMap<>();
columnStatistics.asMap().forEach((fieldName, statisticsHolders) -> resultingStats.put(SchemaPath.parseFromString(fieldName), new ColumnStatistics<>(statisticsHolders, columnTypes.get(fieldName))));
return resultingStats;
}
use of org.apache.drill.metastore.statistics.ColumnStatistics in project drill by apache.
the class ConvertMetadataAggregateToDirectScanRule method populateRecords.
/**
* Populates records list with row group metadata.
*/
private DirectGroupScan populateRecords(Collection<SchemaPath> interestingColumns, Map<String, Class<?>> schema, DrillScanRel scan, ColumnNamesOptions columnNamesOptions) throws IOException {
ParquetGroupScan parquetGroupScan = (ParquetGroupScan) scan.getGroupScan();
DrillTable drillTable = Utilities.getDrillTable(scan.getTable());
Multimap<Path, RowGroupMetadata> rowGroupsMetadataMap = parquetGroupScan.getMetadataProvider().getRowGroupsMetadataMap();
Table<String, Integer, Object> recordsTable = HashBasedTable.create();
FormatSelection selection = (FormatSelection) drillTable.getSelection();
List<String> partitionColumnNames = ColumnExplorer.getPartitionColumnNames(selection.getSelection(), columnNamesOptions);
FileSystem rawFs = selection.getSelection().getSelectionRoot().getFileSystem(new Configuration());
DrillFileSystem fileSystem = ImpersonationUtil.createFileSystem(ImpersonationUtil.getProcessUserName(), rawFs.getConf());
int rowIndex = 0;
for (Map.Entry<Path, RowGroupMetadata> rgEntry : rowGroupsMetadataMap.entries()) {
Path path = rgEntry.getKey();
RowGroupMetadata rowGroupMetadata = rgEntry.getValue();
List<String> partitionValues = ColumnExplorer.listPartitionValues(path, selection.getSelection().getSelectionRoot(), false);
for (int i = 0; i < partitionValues.size(); i++) {
String partitionColumnName = partitionColumnNames.get(i);
recordsTable.put(partitionColumnName, rowIndex, partitionValues.get(i));
}
recordsTable.put(MetastoreAnalyzeConstants.LOCATION_FIELD, rowIndex, ImplicitFileColumns.FQN.getValue(path));
recordsTable.put(columnNamesOptions.rowGroupIndex(), rowIndex, String.valueOf(rowGroupMetadata.getRowGroupIndex()));
if (interestingColumns == null) {
interestingColumns = rowGroupMetadata.getColumnsStatistics().keySet();
}
// populates record list with row group column metadata
for (SchemaPath schemaPath : interestingColumns) {
ColumnStatistics<?> columnStatistics = rowGroupMetadata.getColumnsStatistics().get(schemaPath);
// do not gather statistics for array columns as it is not supported by Metastore
if (containsArrayColumn(rowGroupMetadata.getSchema(), schemaPath)) {
continue;
}
if (IsPredicate.isNullOrEmpty(columnStatistics)) {
logger.debug("Statistics for {} column wasn't found within {} row group.", schemaPath, path);
return null;
}
for (StatisticsKind<?> statisticsKind : AnalyzeColumnUtils.COLUMN_STATISTICS_FUNCTIONS.keySet()) {
Object statsValue;
if (statisticsKind.getName().equalsIgnoreCase(TableStatisticsKind.ROW_COUNT.getName())) {
statsValue = TableStatisticsKind.ROW_COUNT.getValue(rowGroupMetadata);
} else if (statisticsKind.getName().equalsIgnoreCase(ColumnStatisticsKind.NON_NULL_VALUES_COUNT.getName())) {
statsValue = TableStatisticsKind.ROW_COUNT.getValue(rowGroupMetadata) - ColumnStatisticsKind.NULLS_COUNT.getFrom(columnStatistics);
} else {
statsValue = columnStatistics.get(statisticsKind);
}
String columnStatisticsFieldName = AnalyzeColumnUtils.getColumnStatisticsFieldName(schemaPath.toExpr(), statisticsKind);
if (statsValue != null) {
schema.putIfAbsent(columnStatisticsFieldName, statsValue.getClass());
recordsTable.put(columnStatisticsFieldName, rowIndex, statsValue);
} else {
recordsTable.put(columnStatisticsFieldName, rowIndex, BaseParquetMetadataProvider.NULL_VALUE);
}
}
}
// populates record list with row group metadata
for (StatisticsKind<?> statisticsKind : AnalyzeColumnUtils.META_STATISTICS_FUNCTIONS.keySet()) {
String metadataStatisticsFieldName = AnalyzeColumnUtils.getMetadataStatisticsFieldName(statisticsKind);
Object statisticsValue = rowGroupMetadata.getStatistic(statisticsKind);
if (statisticsValue != null) {
schema.putIfAbsent(metadataStatisticsFieldName, statisticsValue.getClass());
recordsTable.put(metadataStatisticsFieldName, rowIndex, statisticsValue);
} else {
recordsTable.put(metadataStatisticsFieldName, rowIndex, BaseParquetMetadataProvider.NULL_VALUE);
}
}
// populates record list internal columns
recordsTable.put(MetastoreAnalyzeConstants.SCHEMA_FIELD, rowIndex, rowGroupMetadata.getSchema().jsonString());
recordsTable.put(columnNamesOptions.rowGroupStart(), rowIndex, Long.toString(rowGroupMetadata.getStatistic(() -> ExactStatisticsConstants.START)));
recordsTable.put(columnNamesOptions.rowGroupLength(), rowIndex, Long.toString(rowGroupMetadata.getStatistic(() -> ExactStatisticsConstants.LENGTH)));
recordsTable.put(columnNamesOptions.lastModifiedTime(), rowIndex, String.valueOf(fileSystem.getFileStatus(path).getModificationTime()));
rowIndex++;
}
// DynamicPojoRecordReader requires LinkedHashMap with fields order
// which corresponds to the value position in record list.
LinkedHashMap<String, Class<?>> orderedSchema = new LinkedHashMap<>();
for (String s : recordsTable.rowKeySet()) {
Class<?> clazz = schema.get(s);
if (clazz != null) {
orderedSchema.put(s, clazz);
} else {
return null;
}
}
IntFunction<List<Object>> collectRecord = currentIndex -> orderedSchema.keySet().stream().map(column -> recordsTable.get(column, currentIndex)).map(value -> value != BaseParquetMetadataProvider.NULL_VALUE ? value : null).collect(Collectors.toList());
List<List<Object>> records = IntStream.range(0, rowIndex).mapToObj(collectRecord).collect(Collectors.toList());
DynamicPojoRecordReader<?> reader = new DynamicPojoRecordReader<>(orderedSchema, records);
ScanStats scanStats = new ScanStats(ScanStats.GroupScanProperty.EXACT_ROW_COUNT, records.size(), 1, schema.size());
return new DirectGroupScan(reader, scanStats);
}
use of org.apache.drill.metastore.statistics.ColumnStatistics in project drill by apache.
the class AbstractParquetScanBatchCreator method getBatch.
protected ScanBatch getBatch(ExecutorFragmentContext context, AbstractParquetRowGroupScan rowGroupScan, OperatorContext oContext) throws ExecutionSetupException {
final ColumnExplorer columnExplorer = new ColumnExplorer(context.getOptions(), rowGroupScan.getColumns());
if (!columnExplorer.isStarQuery()) {
rowGroupScan = rowGroupScan.copy(columnExplorer.getTableColumns());
rowGroupScan.setOperatorId(rowGroupScan.getOperatorId());
}
AbstractDrillFileSystemManager fsManager = getDrillFileSystemCreator(oContext, context.getOptions());
// keep footers in a map to avoid re-reading them
Map<Path, ParquetMetadata> footers = new HashMap<>();
List<CommonParquetRecordReader> readers = new LinkedList<>();
List<Map<String, String>> implicitColumns = new ArrayList<>();
Map<String, String> mapWithMaxColumns = new LinkedHashMap<>();
ParquetReaderConfig readerConfig = rowGroupScan.getReaderConfig();
// to be scanned in case ALL row groups are pruned out
RowGroupReadEntry firstRowGroup = null;
ParquetMetadata firstFooter = null;
// for stats
long rowGroupsPruned = 0;
try {
LogicalExpression filterExpr = rowGroupScan.getFilter();
boolean doRuntimePruning = // was a filter given ? And it is not just a "TRUE" predicate
filterExpr != null && !((filterExpr instanceof ValueExpressions.BooleanExpression) && ((ValueExpressions.BooleanExpression) filterExpr).getBoolean());
// Runtime pruning: Avoid recomputing metadata objects for each row-group in case they use the same file
// by keeping the following objects computed earlier (relies on same file being in consecutive rowgroups)
Path prevRowGroupPath = null;
Metadata_V4.ParquetTableMetadata_v4 tableMetadataV4 = null;
Metadata_V4.ParquetFileAndRowCountMetadata fileMetadataV4 = null;
FilterPredicate<?> filterPredicate = null;
Set<SchemaPath> schemaPathsInExpr = null;
Set<SchemaPath> columnsInExpr = null;
// for debug/info logging
long totalPruneTime = 0;
long totalRowGroups = rowGroupScan.getRowGroupReadEntries().size();
Stopwatch pruneTimer = Stopwatch.createUnstarted();
// If pruning - Prepare the predicate and the columns before the FOR LOOP
if (doRuntimePruning) {
filterPredicate = AbstractGroupScanWithMetadata.getFilterPredicate(filterExpr, context, context.getFunctionRegistry(), context.getOptions(), true, true, /* supports file implicit columns */
rowGroupScan.getSchema());
// Extract only the relevant columns from the filter (sans implicit columns, if any)
schemaPathsInExpr = filterExpr.accept(FilterEvaluatorUtils.FieldReferenceFinder.INSTANCE, null);
columnsInExpr = new HashSet<>();
String partitionColumnLabel = context.getOptions().getOption(ExecConstants.FILESYSTEM_PARTITION_COLUMN_LABEL).string_val;
for (SchemaPath path : schemaPathsInExpr) {
if (rowGroupScan.supportsFileImplicitColumns() && path.toString().matches(partitionColumnLabel + "\\d+")) {
// skip implicit columns like dir0, dir1
continue;
}
columnsInExpr.add(SchemaPath.getSimplePath(path.getRootSegmentPath()));
}
// just in case: if no columns - cancel pruning
doRuntimePruning = !columnsInExpr.isEmpty();
}
for (RowGroupReadEntry rowGroup : rowGroupScan.getRowGroupReadEntries()) {
/*
Here we could store a map from file names to footers, to prevent re-reading the footer for each row group in a file
TODO - to prevent reading the footer again in the parquet record reader (it is read earlier in the ParquetStorageEngine)
we should add more information to the RowGroupInfo that will be populated upon the first read to
provide the reader with all of the file meta-data it needs
These fields will be added to the constructor below
*/
Stopwatch timer = logger.isTraceEnabled() ? Stopwatch.createUnstarted() : null;
DrillFileSystem fs = fsManager.get(rowGroupScan.getFsConf(rowGroup), rowGroup.getPath());
if (!footers.containsKey(rowGroup.getPath())) {
if (timer != null) {
timer.start();
}
ParquetMetadata footer = readFooter(fs.getConf(), rowGroup.getPath(), readerConfig);
if (timer != null) {
long timeToRead = timer.elapsed(TimeUnit.MICROSECONDS);
logger.trace("ParquetTrace,Read Footer,{},{},{},{},{},{},{}", "", rowGroup.getPath(), "", 0, 0, 0, timeToRead);
}
footers.put(rowGroup.getPath(), footer);
}
ParquetMetadata footer = footers.get(rowGroup.getPath());
//
if (doRuntimePruning) {
// skip when no filter or filter is TRUE
pruneTimer.start();
//
// Perform the Run-Time Pruning - i.e. Skip/prune this row group if the match fails
//
// default (in case of exception) - do not prune this row group
RowsMatch matchResult = RowsMatch.ALL;
if (rowGroup.isEmpty()) {
matchResult = RowsMatch.NONE;
} else {
int rowGroupIndex = rowGroup.getRowGroupIndex();
long footerRowCount = footer.getBlocks().get(rowGroupIndex).getRowCount();
// When starting a new file, or at the first time - Initialize the path specific metadata
if (!rowGroup.getPath().equals(prevRowGroupPath)) {
// Create a table metadata (V4)
tableMetadataV4 = new Metadata_V4.ParquetTableMetadata_v4();
// The file status for this file
FileStatus fileStatus = fs.getFileStatus(rowGroup.getPath());
// The file metadata (only for the columns used in the filter)
fileMetadataV4 = Metadata.getParquetFileMetadata_v4(tableMetadataV4, footer, fileStatus, fs, false, true, columnsInExpr, readerConfig);
// for next time
prevRowGroupPath = rowGroup.getPath();
}
MetadataBase.RowGroupMetadata rowGroupMetadata = fileMetadataV4.getFileMetadata().getRowGroups().get(rowGroup.getRowGroupIndex());
Map<SchemaPath, ColumnStatistics<?>> columnsStatistics = ParquetTableMetadataUtils.getRowGroupColumnStatistics(tableMetadataV4, rowGroupMetadata);
try {
Map<SchemaPath, TypeProtos.MajorType> intermediateColumns = ParquetTableMetadataUtils.getIntermediateFields(tableMetadataV4, rowGroupMetadata);
Map<SchemaPath, TypeProtos.MajorType> rowGroupFields = ParquetTableMetadataUtils.getRowGroupFields(tableMetadataV4, rowGroupMetadata);
TupleMetadata rowGroupSchema = new TupleSchema();
rowGroupFields.forEach((schemaPath, majorType) -> SchemaPathUtils.addColumnMetadata(rowGroupSchema, schemaPath, majorType, intermediateColumns));
// updates filter predicate to add required casts for the case when row group schema differs from the table schema
if (!rowGroupSchema.isEquivalent(rowGroupScan.getSchema())) {
filterPredicate = AbstractGroupScanWithMetadata.getFilterPredicate(filterExpr, context, context.getFunctionRegistry(), context.getOptions(), true, true, /* supports file implicit columns */
rowGroupSchema);
}
matchResult = FilterEvaluatorUtils.matches(filterPredicate, columnsStatistics, footerRowCount, rowGroupSchema, schemaPathsInExpr);
// collect logging info
long timeToRead = pruneTimer.elapsed(TimeUnit.MICROSECONDS);
totalPruneTime += timeToRead;
// trace each single row group
logger.trace(// trace each single row group
"Run-time pruning: {} row-group {} (RG index: {} row count: {}), took {} usec", matchResult == RowsMatch.NONE ? "Excluded" : "Included", rowGroup.getPath(), rowGroupIndex, footerRowCount, timeToRead);
} catch (Exception e) {
// in case some unexpected exception is raised
logger.warn("Run-time pruning check failed - {}. Skip pruning rowgroup - {}", e.getMessage(), rowGroup.getPath());
logger.debug("Failure during run-time pruning: {}", e.getMessage(), e);
}
}
pruneTimer.stop();
pruneTimer.reset();
// If this row group failed the match - skip it (i.e., no reader for this rowgroup)
if (matchResult == RowsMatch.NONE) {
// one more RG was pruned
rowGroupsPruned++;
if (firstRowGroup == null) {
// keep the first RG, to be used in case all row groups are pruned
firstRowGroup = rowGroup;
firstFooter = footer;
}
// This Row group does not comply with the filter - prune it out and check the next Row Group
continue;
}
}
mapWithMaxColumns = createReaderAndImplicitColumns(context, rowGroupScan, oContext, columnExplorer, readers, implicitColumns, mapWithMaxColumns, rowGroup, fs, footer, false);
}
// in case all row groups were pruned out - create a single reader for the first one (so that the schema could be returned)
if (readers.isEmpty() && firstRowGroup != null) {
DrillFileSystem fs = fsManager.get(rowGroupScan.getFsConf(firstRowGroup), firstRowGroup.getPath());
mapWithMaxColumns = createReaderAndImplicitColumns(context, rowGroupScan, oContext, columnExplorer, readers, implicitColumns, mapWithMaxColumns, firstRowGroup, fs, firstFooter, true);
}
// do some logging, if relevant
if (totalPruneTime > 0) {
logger.info("Finished parquet_runtime_pruning in {} usec. Out of given {} rowgroups, {} were pruned. {}", totalPruneTime, totalRowGroups, rowGroupsPruned, totalRowGroups == rowGroupsPruned ? "ALL_PRUNED !!" : "");
}
// Update stats (same in every reader - the others would just overwrite the stats)
for (CommonParquetRecordReader rr : readers) {
rr.updateRowGroupsStats(totalRowGroups, rowGroupsPruned);
}
} catch (IOException | InterruptedException e) {
throw new ExecutionSetupException(e);
}
// all readers should have the same number of implicit columns, add missing ones with value null
Map<String, String> diff = Maps.transformValues(mapWithMaxColumns, Functions.constant(null));
for (Map<String, String> map : implicitColumns) {
map.putAll(Maps.difference(map, diff).entriesOnlyOnRight());
}
return new ScanBatch(context, oContext, readers, implicitColumns);
}
Aggregations