Search in sources :

Example 51 with TableScan

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.TableScan in project drill by apache.

the class FileMetadataInfoCollector method init.

private void init(FormatSelection selection, PlannerSettings settings, Supplier<TableScan> tableScanSupplier, List<SchemaPath> interestingColumns, int segmentColumnsCount) throws IOException {
    List<SchemaPath> metastoreInterestingColumns = Optional.ofNullable(basicRequests.interestingColumnsAndPartitionKeys(tableInfo).interestingColumns()).map(metastoreInterestingColumnNames -> metastoreInterestingColumnNames.stream().map(SchemaPath::parseFromString).collect(Collectors.toList())).orElse(null);
    Map<String, Long> filesNamesLastModifiedTime = basicRequests.filesLastModifiedTime(tableInfo, null, null);
    List<String> newFiles = new ArrayList<>();
    List<String> updatedFiles = new ArrayList<>();
    List<String> removedFiles = new ArrayList<>(filesNamesLastModifiedTime.keySet());
    List<String> allFiles = new ArrayList<>();
    for (FileStatus fileStatus : getFileStatuses(selection)) {
        String path = Path.getPathWithoutSchemeAndAuthority(fileStatus.getPath()).toUri().getPath();
        Long lastModificationTime = filesNamesLastModifiedTime.get(path);
        if (lastModificationTime == null) {
            newFiles.add(path);
        } else if (lastModificationTime < fileStatus.getModificationTime()) {
            updatedFiles.add(path);
        }
        removedFiles.remove(path);
        allFiles.add(path);
    }
    String selectionRoot = selection.getSelection().getSelectionRoot().toUri().getPath();
    if (!Objects.equals(metastoreInterestingColumns, interestingColumns) && metastoreInterestingColumns != null && (interestingColumns == null || !metastoreInterestingColumns.containsAll(interestingColumns)) || TableStatisticsKind.ANALYZE_METADATA_LEVEL.getValue(basicRequests.tableMetadata(tableInfo)).compareTo(metadataLevel) != 0) {
        // do not update table scan and lists of segments / files / row groups,
        // metadata should be recalculated
        tableScan = tableScanSupplier.get();
        metadataToRemove.addAll(getMetadataInfoList(selectionRoot, removedFiles, MetadataType.SEGMENT, 0));
        return;
    }
    // checks whether there are no new, updated and removed files
    if (!newFiles.isEmpty() || !updatedFiles.isEmpty() || !removedFiles.isEmpty()) {
        List<String> scanFiles = new ArrayList<>(newFiles);
        scanFiles.addAll(updatedFiles);
        // updates scan to read updated / new files
        tableScan = getTableScan(settings, tableScanSupplier.get(), scanFiles);
        // iterates from the end;
        // takes deepest updated segments;
        // finds their parents:
        // - fetches all segments for parent level;
        // - filters segments to leave parents only;
        // obtains all child segments;
        // filters child segments for filtered parent segments
        int lastSegmentIndex = segmentColumnsCount - 1;
        List<String> scanAndRemovedFiles = new ArrayList<>(scanFiles);
        scanAndRemovedFiles.addAll(removedFiles);
        // 1. Obtain files info for files from the same folder without removed files
        // 2. Get segments for obtained files + segments for removed files
        // 3. Get parent segments
        // 4. Get other segments for the same parent segment
        // 5. Remove segments which have only removed files (matched for removedFileInfo and don't match to filesInfo)
        // 6. Do the same for parent segments
        List<MetadataInfo> allFilesInfo = getMetadataInfoList(selectionRoot, allFiles, MetadataType.FILE, 0);
        // first pass: collect updated segments even without files, they will be removed later
        List<MetadataInfo> leafSegments = getMetadataInfoList(selectionRoot, scanAndRemovedFiles, MetadataType.SEGMENT, lastSegmentIndex);
        List<MetadataInfo> removedFilesMetadata = getMetadataInfoList(selectionRoot, removedFiles, MetadataType.FILE, 0);
        List<MetadataInfo> scanFilesInfo = getMetadataInfoList(selectionRoot, scanAndRemovedFiles, MetadataType.FILE, 0);
        // files from scan + files from the same folder without removed files
        filesInfo = leafSegments.stream().filter(parent -> scanFilesInfo.stream().anyMatch(child -> MetadataIdentifierUtils.isMetadataKeyParent(parent.identifier(), child.identifier()))).flatMap(parent -> allFilesInfo.stream().filter(child -> MetadataIdentifierUtils.isMetadataKeyParent(parent.identifier(), child.identifier()))).collect(Collectors.toList());
        Multimap<Integer, MetadataInfo> allSegments = populateSegments(removedFiles, allFiles, selectionRoot, lastSegmentIndex, leafSegments, removedFilesMetadata);
        List<MetadataInfo> allRowGroupsInfo = getAllRowGroupsMetadataInfos(allFiles);
        rowGroupsInfo = allRowGroupsInfo.stream().filter(child -> filesInfo.stream().map(MetadataInfo::identifier).anyMatch(parent -> MetadataIdentifierUtils.isMetadataKeyParent(parent, child.identifier()))).collect(Collectors.toList());
        List<MetadataInfo> segmentsToUpdate = getMetadataInfoList(selectionRoot, scanAndRemovedFiles, MetadataType.SEGMENT, 0);
        allMetaToHandle = Streams.concat(allSegments.values().stream(), allFilesInfo.stream(), allRowGroupsInfo.stream()).filter(child -> segmentsToUpdate.stream().anyMatch(parent -> MetadataIdentifierUtils.isMetadataKeyParent(parent.identifier(), child.identifier()))).filter(parent -> removedFilesMetadata.stream().noneMatch(child -> MetadataIdentifierUtils.isMetadataKeyParent(parent.identifier(), child.identifier())) || filesInfo.stream().anyMatch(child -> MetadataIdentifierUtils.isMetadataKeyParent(parent.identifier(), child.identifier()))).collect(Collectors.toList());
        // removed top-level segments are handled separately since their metadata is not overridden when producing writing to the Metastore
        List<MetadataInfo> removedTopSegments = getMetadataInfoList(selectionRoot, removedFiles, MetadataType.SEGMENT, 0).stream().filter(parent -> removedFilesMetadata.stream().anyMatch(child -> MetadataIdentifierUtils.isMetadataKeyParent(parent.identifier(), child.identifier())) && allFilesInfo.stream().noneMatch(child -> MetadataIdentifierUtils.isMetadataKeyParent(parent.identifier(), child.identifier()))).collect(Collectors.toList());
        metadataToRemove.addAll(removedTopSegments);
        segmentsToUpdate.stream().filter(segment -> !removedTopSegments.contains(segment)).forEach(allMetaToHandle::add);
    } else {
        // table metadata may still be actual
        outdated = false;
    }
}
Also used : SchemalessScan(org.apache.drill.exec.physical.base.SchemalessScan) MetadataType(org.apache.drill.metastore.metadata.MetadataType) TableScan(org.apache.calcite.rel.core.TableScan) Arrays(java.util.Arrays) TableInfo(org.apache.drill.metastore.metadata.TableInfo) MetastoreColumn(org.apache.drill.metastore.MetastoreColumn) FileSystem(org.apache.hadoop.fs.FileSystem) DrillRel(org.apache.drill.exec.planner.logical.DrillRel) Streams(org.apache.drill.shaded.guava.com.google.common.collect.Streams) DrillScanRel(org.apache.drill.exec.planner.logical.DrillScanRel) MetadataInfo(org.apache.drill.metastore.metadata.MetadataInfo) DrillFileSystem(org.apache.drill.exec.store.dfs.DrillFileSystem) ArrayListMultimap(org.apache.drill.shaded.guava.com.google.common.collect.ArrayListMultimap) FileStatus(org.apache.hadoop.fs.FileStatus) DrillTable(org.apache.drill.exec.planner.logical.DrillTable) ColumnExplorer(org.apache.drill.exec.store.ColumnExplorer) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) FormatSelection(org.apache.drill.exec.store.dfs.FormatSelection) ImpersonationUtil(org.apache.drill.exec.util.ImpersonationUtil) Path(org.apache.hadoop.fs.Path) FileSelection(org.apache.drill.exec.store.dfs.FileSelection) Multimap(org.apache.drill.shaded.guava.com.google.common.collect.Multimap) TableStatisticsKind(org.apache.drill.metastore.statistics.TableStatisticsKind) PartitionLocation(org.apache.drill.exec.planner.PartitionLocation) BasicTablesRequests(org.apache.drill.metastore.components.tables.BasicTablesRequests) Collection(java.util.Collection) SchemaPath(org.apache.drill.common.expression.SchemaPath) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) DrillFileSystemUtil(org.apache.drill.exec.util.DrillFileSystemUtil) List(java.util.List) Lists(org.apache.drill.shaded.guava.com.google.common.collect.Lists) FileSystemPartitionDescriptor(org.apache.drill.exec.planner.FileSystemPartitionDescriptor) PlannerSettings(org.apache.drill.exec.planner.physical.PlannerSettings) Optional(java.util.Optional) Collections(java.util.Collections) MetadataInfo(org.apache.drill.metastore.metadata.MetadataInfo) FileStatus(org.apache.hadoop.fs.FileStatus) ArrayList(java.util.ArrayList) SchemaPath(org.apache.drill.common.expression.SchemaPath)

Example 52 with TableScan

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.TableScan in project drill by apache.

the class FileMetadataInfoCollector method getTableScan.

private TableScan getTableScan(PlannerSettings settings, TableScan scanRel, List<String> scanFiles) {
    FileSystemPartitionDescriptor descriptor = new FileSystemPartitionDescriptor(settings, scanRel);
    List<PartitionLocation> newPartitions = Lists.newArrayList(descriptor.iterator()).stream().flatMap(Collection::stream).flatMap(p -> p.getPartitionLocationRecursive().stream()).filter(p -> scanFiles.contains(p.getEntirePartitionLocation().toUri().getPath())).collect(Collectors.toList());
    try {
        if (!newPartitions.isEmpty()) {
            return descriptor.createTableScan(newPartitions, false);
        } else {
            DrillTable drillTable = descriptor.getTable();
            SchemalessScan scan = new SchemalessScan(drillTable.getUserName(), ((FormatSelection) descriptor.getTable().getSelection()).getSelection().getSelectionRoot());
            return new DrillScanRel(scanRel.getCluster(), scanRel.getTraitSet().plus(DrillRel.DRILL_LOGICAL), scanRel.getTable(), scan, scanRel.getRowType(), DrillScanRel.getProjectedColumns(scanRel.getTable(), true), true);
        }
    } catch (Exception e) {
        throw new RuntimeException("Error happened during recreation of pruned scan", e);
    }
}
Also used : SchemalessScan(org.apache.drill.exec.physical.base.SchemalessScan) MetadataType(org.apache.drill.metastore.metadata.MetadataType) TableScan(org.apache.calcite.rel.core.TableScan) Arrays(java.util.Arrays) TableInfo(org.apache.drill.metastore.metadata.TableInfo) MetastoreColumn(org.apache.drill.metastore.MetastoreColumn) FileSystem(org.apache.hadoop.fs.FileSystem) DrillRel(org.apache.drill.exec.planner.logical.DrillRel) Streams(org.apache.drill.shaded.guava.com.google.common.collect.Streams) DrillScanRel(org.apache.drill.exec.planner.logical.DrillScanRel) MetadataInfo(org.apache.drill.metastore.metadata.MetadataInfo) DrillFileSystem(org.apache.drill.exec.store.dfs.DrillFileSystem) ArrayListMultimap(org.apache.drill.shaded.guava.com.google.common.collect.ArrayListMultimap) FileStatus(org.apache.hadoop.fs.FileStatus) DrillTable(org.apache.drill.exec.planner.logical.DrillTable) ColumnExplorer(org.apache.drill.exec.store.ColumnExplorer) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) FormatSelection(org.apache.drill.exec.store.dfs.FormatSelection) ImpersonationUtil(org.apache.drill.exec.util.ImpersonationUtil) Path(org.apache.hadoop.fs.Path) FileSelection(org.apache.drill.exec.store.dfs.FileSelection) Multimap(org.apache.drill.shaded.guava.com.google.common.collect.Multimap) TableStatisticsKind(org.apache.drill.metastore.statistics.TableStatisticsKind) PartitionLocation(org.apache.drill.exec.planner.PartitionLocation) BasicTablesRequests(org.apache.drill.metastore.components.tables.BasicTablesRequests) Collection(java.util.Collection) SchemaPath(org.apache.drill.common.expression.SchemaPath) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) DrillFileSystemUtil(org.apache.drill.exec.util.DrillFileSystemUtil) List(java.util.List) Lists(org.apache.drill.shaded.guava.com.google.common.collect.Lists) FileSystemPartitionDescriptor(org.apache.drill.exec.planner.FileSystemPartitionDescriptor) PlannerSettings(org.apache.drill.exec.planner.physical.PlannerSettings) Optional(java.util.Optional) Collections(java.util.Collections) FileSystemPartitionDescriptor(org.apache.drill.exec.planner.FileSystemPartitionDescriptor) DrillScanRel(org.apache.drill.exec.planner.logical.DrillScanRel) DrillTable(org.apache.drill.exec.planner.logical.DrillTable) FormatSelection(org.apache.drill.exec.store.dfs.FormatSelection) PartitionLocation(org.apache.drill.exec.planner.PartitionLocation) SchemalessScan(org.apache.drill.exec.physical.base.SchemalessScan) IOException(java.io.IOException)

Example 53 with TableScan

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.TableScan in project drill by apache.

the class ParquetPruneScanRule method getFilterOnProjectParquet.

public static RelOptRule getFilterOnProjectParquet(OptimizerRulesContext optimizerRulesContext) {
    return new PruneScanRule(RelOptHelper.some(DrillFilterRel.class, RelOptHelper.some(DrillProjectRel.class, RelOptHelper.any(DrillScanRel.class))), "PruneScanRule:Filter_On_Project_Parquet", optimizerRulesContext) {

        @Override
        public PartitionDescriptor getPartitionDescriptor(PlannerSettings settings, TableScan scanRel) {
            return new ParquetPartitionDescriptor(settings, (DrillScanRel) scanRel);
        }

        @Override
        public boolean matches(RelOptRuleCall call) {
            final DrillScanRel scan = call.rel(2);
            GroupScan groupScan = scan.getGroupScan();
            // this rule is applicable only for parquet based partition pruning
            if (PrelUtil.getPlannerSettings(scan.getCluster().getPlanner()).isHepPartitionPruningEnabled()) {
                return groupScan instanceof AbstractParquetGroupScan && groupScan.supportsPartitionFilterPushdown() && !scan.partitionFilterPushdown();
            } else {
                return groupScan instanceof AbstractParquetGroupScan && groupScan.supportsPartitionFilterPushdown();
            }
        }

        @Override
        public void onMatch(RelOptRuleCall call) {
            final DrillFilterRel filterRel = call.rel(0);
            final DrillProjectRel projectRel = call.rel(1);
            final DrillScanRel scanRel = call.rel(2);
            doOnMatch(call, filterRel, projectRel, scanRel);
        }
    };
}
Also used : AbstractParquetGroupScan(org.apache.drill.exec.store.parquet.AbstractParquetGroupScan) GroupScan(org.apache.drill.exec.physical.base.GroupScan) TableScan(org.apache.calcite.rel.core.TableScan) DrillScanRel(org.apache.drill.exec.planner.logical.DrillScanRel) AbstractParquetGroupScan(org.apache.drill.exec.store.parquet.AbstractParquetGroupScan) PlannerSettings(org.apache.drill.exec.planner.physical.PlannerSettings) DrillProjectRel(org.apache.drill.exec.planner.logical.DrillProjectRel) DrillFilterRel(org.apache.drill.exec.planner.logical.DrillFilterRel) RelOptRuleCall(org.apache.calcite.plan.RelOptRuleCall) ParquetPartitionDescriptor(org.apache.drill.exec.planner.ParquetPartitionDescriptor)

Aggregations

TableScan (org.apache.calcite.rel.core.TableScan)51 RelNode (org.apache.calcite.rel.RelNode)19 ArrayList (java.util.ArrayList)14 PlannerSettings (org.apache.drill.exec.planner.physical.PlannerSettings)13 Project (org.apache.calcite.rel.core.Project)12 DrillScanRel (org.apache.drill.exec.planner.logical.DrillScanRel)11 Filter (org.apache.calcite.rel.core.Filter)10 IOException (java.io.IOException)9 RexNode (org.apache.calcite.rex.RexNode)9 GroupScan (org.apache.drill.exec.physical.base.GroupScan)9 RelOptRuleCall (org.apache.calcite.plan.RelOptRuleCall)8 DrillFilterRel (org.apache.drill.exec.planner.logical.DrillFilterRel)8 Aggregate (org.apache.calcite.rel.core.Aggregate)7 LogicalProject (org.apache.calcite.rel.logical.LogicalProject)7 RexBuilder (org.apache.calcite.rex.RexBuilder)6 RelOptCluster (org.apache.calcite.plan.RelOptCluster)5 RelShuttleImpl (org.apache.calcite.rel.RelShuttleImpl)5 LogicalJoin (org.apache.calcite.rel.logical.LogicalJoin)5 RelDataType (org.apache.calcite.rel.type.RelDataType)5 SchemaPath (org.apache.drill.common.expression.SchemaPath)5