Search in sources :

Example 1 with ParquetFileAndRowCountMetadata

use of org.apache.drill.exec.store.parquet.metadata.Metadata_V4.ParquetFileAndRowCountMetadata in project drill by apache.

the class Metadata method createMetaFilesRecursively.

/**
 * Create the parquet metadata files for the directory at the given path and for any subdirectories.
 * Metadata cache files written to the disk contain relative paths. Returned Pair of metadata contains absolute paths.
 *
 * @param path to the directory of the parquet table
 * @param fs file system
 * @param allColumnsInteresting if set, store column metadata for all the columns
 * @param columnSet Set of columns for which column metadata has to be stored
 * @return Pair of parquet metadata. The left one is a parquet metadata for the table. The right one of the Pair is
 *         a metadata for all subdirectories (if they are present and there are no any parquet files in the
 *         {@code path} directory).
 * @throws IOException if parquet metadata can't be serialized and written to the json file
 */
private Pair<ParquetTableMetadata_v4, ParquetTableMetadataDirs> createMetaFilesRecursively(Path path, FileSystem fs, boolean allColumnsInteresting, Set<SchemaPath> columnSet) throws IOException {
    Stopwatch timer = logger.isDebugEnabled() ? Stopwatch.createStarted() : null;
    List<ParquetFileMetadata_v4> metaDataList = Lists.newArrayList();
    List<Path> directoryList = Lists.newArrayList();
    ConcurrentHashMap<ColumnTypeMetadata_v4.Key, ColumnTypeMetadata_v4> columnTypeInfoSet = new ConcurrentHashMap<>();
    FileStatus fileStatus = fs.getFileStatus(path);
    long dirTotalRowCount = 0;
    assert fileStatus.isDirectory() : "Expected directory";
    final Map<FileStatus, FileSystem> childFiles = new LinkedHashMap<>();
    for (final FileStatus file : DrillFileSystemUtil.listAll(fs, path, false)) {
        if (file.isDirectory()) {
            ParquetTableMetadata_v4 subTableMetadata = (createMetaFilesRecursively(file.getPath(), fs, allColumnsInteresting, columnSet)).getLeft();
            ConcurrentHashMap<ColumnTypeMetadata_v4.Key, ColumnTypeMetadata_v4> subTableColumnTypeInfo = subTableMetadata.getColumnTypeInfoMap();
            metaDataList.addAll((List<ParquetFileMetadata_v4>) subTableMetadata.getFiles());
            directoryList.addAll(subTableMetadata.getDirectories());
            directoryList.add(file.getPath());
            // TODO: We need a merge method that merges two columns with the same name but different types
            if (columnTypeInfoSet.isEmpty()) {
                columnTypeInfoSet.putAll(subTableColumnTypeInfo);
            } else {
                for (ColumnTypeMetadata_v4.Key key : subTableColumnTypeInfo.keySet()) {
                    ColumnTypeMetadata_v4 columnTypeMetadata_v4 = columnTypeInfoSet.get(key);
                    if (columnTypeMetadata_v4 == null) {
                        columnTypeMetadata_v4 = subTableColumnTypeInfo.get(key);
                    } else {
                        // as unknown
                        if (subTableColumnTypeInfo.get(key).totalNullCount < 0 || columnTypeMetadata_v4.totalNullCount < 0) {
                            columnTypeMetadata_v4.totalNullCount = NULL_COUNT_NOT_EXISTS;
                        } else {
                            columnTypeMetadata_v4.totalNullCount = columnTypeMetadata_v4.totalNullCount + subTableColumnTypeInfo.get(key).totalNullCount;
                        }
                    }
                    columnTypeInfoSet.put(key, columnTypeMetadata_v4);
                }
            }
            dirTotalRowCount = dirTotalRowCount + subTableMetadata.getTotalRowCount();
        } else {
            childFiles.put(file, fs);
        }
    }
    Metadata_V4.MetadataSummary metadataSummary = new Metadata_V4.MetadataSummary(SUPPORTED_VERSIONS.last().toString(), DrillVersionInfo.getVersion(), allColumnsInteresting || columnSet == null);
    ParquetTableMetadata_v4 parquetTableMetadata = new ParquetTableMetadata_v4(metadataSummary);
    if (childFiles.size() > 0) {
        List<ParquetFileAndRowCountMetadata> childFileAndRowCountMetadata = getParquetFileMetadata_v4(parquetTableMetadata, childFiles, allColumnsInteresting, columnSet);
        // If the columnTypeInfoSet is empty, add the columnTypeInfo from the parquetTableMetadata
        if (columnTypeInfoSet.isEmpty()) {
            columnTypeInfoSet.putAll(parquetTableMetadata.getColumnTypeInfoMap());
        }
        for (ParquetFileAndRowCountMetadata parquetFileAndRowCountMetadata : childFileAndRowCountMetadata) {
            metaDataList.add(parquetFileAndRowCountMetadata.getFileMetadata());
            dirTotalRowCount = dirTotalRowCount + parquetFileAndRowCountMetadata.getFileRowCount();
            Map<ColumnTypeMetadata_v4.Key, Long> totalNullCountMap = parquetFileAndRowCountMetadata.getTotalNullCountMap();
            for (ColumnTypeMetadata_v4.Key column : totalNullCountMap.keySet()) {
                ColumnTypeMetadata_v4 columnTypeMetadata_v4 = columnTypeInfoSet.get(column);
                // If the column is not present in columnTypeInfoSet, get it from parquetTableMetadata
                if (columnTypeMetadata_v4 == null) {
                    columnTypeMetadata_v4 = parquetTableMetadata.getColumnTypeInfoMap().get(column);
                }
                // as unknown
                if (columnTypeMetadata_v4.totalNullCount < 0 || totalNullCountMap.get(column) < 0) {
                    columnTypeMetadata_v4.totalNullCount = NULL_COUNT_NOT_EXISTS;
                } else {
                    columnTypeMetadata_v4.totalNullCount += totalNullCountMap.get(column);
                }
                columnTypeInfoSet.put(column, columnTypeMetadata_v4);
            }
        }
    }
    metadataSummary.directories = directoryList;
    parquetTableMetadata.assignFiles(metaDataList);
    // TODO: We need a merge method that merges two columns with the same name but different types
    if (metadataSummary.columnTypeInfo == null) {
        metadataSummary.columnTypeInfo = new ConcurrentHashMap<>();
    }
    metadataSummary.columnTypeInfo.putAll(columnTypeInfoSet);
    metadataSummary.allColumnsInteresting = allColumnsInteresting;
    metadataSummary.totalRowCount = dirTotalRowCount;
    parquetTableMetadata.metadataSummary = metadataSummary;
    for (String oldName : OLD_METADATA_FILENAMES) {
        fs.delete(new Path(path, oldName), false);
    }
    // relative paths in the metadata are only necessary for meta cache files.
    ParquetTableMetadata_v4 metadataTableWithRelativePaths = MetadataPathUtils.createMetadataWithRelativePaths(parquetTableMetadata, path);
    writeFile(metadataTableWithRelativePaths.fileMetadata, new Path(path, METADATA_FILENAME), fs);
    writeFile(metadataTableWithRelativePaths.getSummary(), new Path(path, METADATA_SUMMARY_FILENAME), fs);
    Metadata_V4.MetadataSummary metadataSummaryWithRelativePaths = metadataTableWithRelativePaths.getSummary();
    // Directories list will be empty at the leaf level directories. For sub-directories with both files and directories,
    // only the directories will be included in the list.
    writeFile(new ParquetTableMetadataDirs(metadataSummaryWithRelativePaths.directories), new Path(path, METADATA_DIRECTORIES_FILENAME), fs);
    if (timer != null) {
        logger.debug("Creating metadata files recursively took {} ms", timer.elapsed(TimeUnit.MILLISECONDS));
        timer.stop();
    }
    return Pair.of(parquetTableMetadata, new ParquetTableMetadataDirs(directoryList));
}
Also used : ColumnTypeMetadata_v4(org.apache.drill.exec.store.parquet.metadata.Metadata_V4.ColumnTypeMetadata_v4) FileStatus(org.apache.hadoop.fs.FileStatus) Stopwatch(org.apache.drill.shaded.guava.com.google.common.base.Stopwatch) LinkedHashMap(java.util.LinkedHashMap) MetadataSummary(org.apache.drill.exec.store.parquet.metadata.Metadata_V4.MetadataSummary) FileSystem(org.apache.hadoop.fs.FileSystem) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Path(org.apache.hadoop.fs.Path) SchemaPath(org.apache.drill.common.expression.SchemaPath) ParquetFileAndRowCountMetadata(org.apache.drill.exec.store.parquet.metadata.Metadata_V4.ParquetFileAndRowCountMetadata) ParquetTableMetadata_v4(org.apache.drill.exec.store.parquet.metadata.Metadata_V4.ParquetTableMetadata_v4) MetadataSummary(org.apache.drill.exec.store.parquet.metadata.Metadata_V4.MetadataSummary) ParquetFileMetadata_v4(org.apache.drill.exec.store.parquet.metadata.Metadata_V4.ParquetFileMetadata_v4)

Example 2 with ParquetFileAndRowCountMetadata

use of org.apache.drill.exec.store.parquet.metadata.Metadata_V4.ParquetFileAndRowCountMetadata in project drill by apache.

the class Metadata method getParquetTableMetadata.

/**
 * Get the parquet metadata for a list of parquet files
 *
 * @param fileStatusMap file statuses and corresponding file systems
 * @return parquet table metadata object
 * @throws IOException if parquet file metadata can't be obtained
 */
private ParquetTableMetadata_v4 getParquetTableMetadata(Map<FileStatus, FileSystem> fileStatusMap) throws IOException {
    Metadata_V4.MetadataSummary tableMetadataSummary = new Metadata_V4.MetadataSummary(SUPPORTED_VERSIONS.last().toString(), DrillVersionInfo.getVersion(), new ArrayList<>(), true);
    ParquetTableMetadata_v4 tableMetadata = new ParquetTableMetadata_v4(tableMetadataSummary);
    List<ParquetFileAndRowCountMetadata> parquetFileAndRowCountMetadata = getParquetFileMetadata_v4(tableMetadata, fileStatusMap, true, null);
    List<ParquetFileMetadata_v4> parquetFileMetadata = new ArrayList<>();
    for (ParquetFileAndRowCountMetadata fileAndGlobalMetadata : parquetFileAndRowCountMetadata) {
        parquetFileMetadata.add(fileAndGlobalMetadata.getFileMetadata());
    }
    tableMetadata.assignFiles(parquetFileMetadata);
    return tableMetadata;
}
Also used : ParquetFileAndRowCountMetadata(org.apache.drill.exec.store.parquet.metadata.Metadata_V4.ParquetFileAndRowCountMetadata) MetadataSummary(org.apache.drill.exec.store.parquet.metadata.Metadata_V4.MetadataSummary) ArrayList(java.util.ArrayList) ParquetTableMetadata_v4(org.apache.drill.exec.store.parquet.metadata.Metadata_V4.ParquetTableMetadata_v4) MetadataSummary(org.apache.drill.exec.store.parquet.metadata.Metadata_V4.MetadataSummary) ParquetFileMetadata_v4(org.apache.drill.exec.store.parquet.metadata.Metadata_V4.ParquetFileMetadata_v4)

Example 3 with ParquetFileAndRowCountMetadata

use of org.apache.drill.exec.store.parquet.metadata.Metadata_V4.ParquetFileAndRowCountMetadata in project drill by apache.

the class AbstractParquetScanBatchCreator method getBatch.

protected ScanBatch getBatch(ExecutorFragmentContext context, AbstractParquetRowGroupScan rowGroupScan, OperatorContext oContext) throws ExecutionSetupException {
    final ColumnExplorer columnExplorer = new ColumnExplorer(context.getOptions(), rowGroupScan.getColumns());
    if (!columnExplorer.isStarQuery()) {
        rowGroupScan = rowGroupScan.copy(columnExplorer.getTableColumns());
        rowGroupScan.setOperatorId(rowGroupScan.getOperatorId());
    }
    AbstractDrillFileSystemManager fsManager = getDrillFileSystemCreator(oContext, context.getOptions());
    // keep footers in a map to avoid re-reading them
    Map<Path, ParquetMetadata> footers = new HashMap<>();
    List<CommonParquetRecordReader> readers = new LinkedList<>();
    List<Map<String, String>> implicitColumns = new ArrayList<>();
    Map<String, String> mapWithMaxColumns = new LinkedHashMap<>();
    ParquetReaderConfig readerConfig = rowGroupScan.getReaderConfig();
    // to be scanned in case ALL row groups are pruned out
    RowGroupReadEntry firstRowGroup = null;
    ParquetMetadata firstFooter = null;
    // for stats
    long rowGroupsPruned = 0;
    try {
        LogicalExpression filterExpr = rowGroupScan.getFilter();
        boolean doRuntimePruning = // was a filter given ?   And it is not just a "TRUE" predicate
        filterExpr != null && !((filterExpr instanceof ValueExpressions.BooleanExpression) && ((ValueExpressions.BooleanExpression) filterExpr).getBoolean());
        // Runtime pruning: Avoid recomputing metadata objects for each row-group in case they use the same file
        // by keeping the following objects computed earlier (relies on same file being in consecutive rowgroups)
        Path prevRowGroupPath = null;
        Metadata_V4.ParquetTableMetadata_v4 tableMetadataV4 = null;
        Metadata_V4.ParquetFileAndRowCountMetadata fileMetadataV4 = null;
        FilterPredicate<?> filterPredicate = null;
        Set<SchemaPath> schemaPathsInExpr = null;
        Set<SchemaPath> columnsInExpr = null;
        // for debug/info logging
        long totalPruneTime = 0;
        long totalRowGroups = rowGroupScan.getRowGroupReadEntries().size();
        Stopwatch pruneTimer = Stopwatch.createUnstarted();
        // If pruning - Prepare the predicate and the columns before the FOR LOOP
        if (doRuntimePruning) {
            filterPredicate = AbstractGroupScanWithMetadata.getFilterPredicate(filterExpr, context, context.getFunctionRegistry(), context.getOptions(), true, true, /* supports file implicit columns */
            rowGroupScan.getSchema());
            // Extract only the relevant columns from the filter (sans implicit columns, if any)
            schemaPathsInExpr = filterExpr.accept(FilterEvaluatorUtils.FieldReferenceFinder.INSTANCE, null);
            columnsInExpr = new HashSet<>();
            String partitionColumnLabel = context.getOptions().getOption(ExecConstants.FILESYSTEM_PARTITION_COLUMN_LABEL).string_val;
            for (SchemaPath path : schemaPathsInExpr) {
                if (rowGroupScan.supportsFileImplicitColumns() && path.toString().matches(partitionColumnLabel + "\\d+")) {
                    // skip implicit columns like dir0, dir1
                    continue;
                }
                columnsInExpr.add(SchemaPath.getSimplePath(path.getRootSegmentPath()));
            }
            // just in case: if no columns - cancel pruning
            doRuntimePruning = !columnsInExpr.isEmpty();
        }
        for (RowGroupReadEntry rowGroup : rowGroupScan.getRowGroupReadEntries()) {
            /*
        Here we could store a map from file names to footers, to prevent re-reading the footer for each row group in a file
        TODO - to prevent reading the footer again in the parquet record reader (it is read earlier in the ParquetStorageEngine)
        we should add more information to the RowGroupInfo that will be populated upon the first read to
        provide the reader with all of the file meta-data it needs
        These fields will be added to the constructor below
        */
            Stopwatch timer = logger.isTraceEnabled() ? Stopwatch.createUnstarted() : null;
            DrillFileSystem fs = fsManager.get(rowGroupScan.getFsConf(rowGroup), rowGroup.getPath());
            if (!footers.containsKey(rowGroup.getPath())) {
                if (timer != null) {
                    timer.start();
                }
                ParquetMetadata footer = readFooter(fs.getConf(), rowGroup.getPath(), readerConfig);
                if (timer != null) {
                    long timeToRead = timer.elapsed(TimeUnit.MICROSECONDS);
                    logger.trace("ParquetTrace,Read Footer,{},{},{},{},{},{},{}", "", rowGroup.getPath(), "", 0, 0, 0, timeToRead);
                }
                footers.put(rowGroup.getPath(), footer);
            }
            ParquetMetadata footer = footers.get(rowGroup.getPath());
            // 
            if (doRuntimePruning) {
                // skip when no filter or filter is TRUE
                pruneTimer.start();
                // 
                // Perform the Run-Time Pruning - i.e. Skip/prune this row group if the match fails
                // 
                // default (in case of exception) - do not prune this row group
                RowsMatch matchResult = RowsMatch.ALL;
                if (rowGroup.isEmpty()) {
                    matchResult = RowsMatch.NONE;
                } else {
                    int rowGroupIndex = rowGroup.getRowGroupIndex();
                    long footerRowCount = footer.getBlocks().get(rowGroupIndex).getRowCount();
                    // When starting a new file, or at the first time - Initialize the path specific metadata
                    if (!rowGroup.getPath().equals(prevRowGroupPath)) {
                        // Create a table metadata (V4)
                        tableMetadataV4 = new Metadata_V4.ParquetTableMetadata_v4();
                        // The file status for this file
                        FileStatus fileStatus = fs.getFileStatus(rowGroup.getPath());
                        // The file metadata (only for the columns used in the filter)
                        fileMetadataV4 = Metadata.getParquetFileMetadata_v4(tableMetadataV4, footer, fileStatus, fs, false, true, columnsInExpr, readerConfig);
                        // for next time
                        prevRowGroupPath = rowGroup.getPath();
                    }
                    MetadataBase.RowGroupMetadata rowGroupMetadata = fileMetadataV4.getFileMetadata().getRowGroups().get(rowGroup.getRowGroupIndex());
                    Map<SchemaPath, ColumnStatistics<?>> columnsStatistics = ParquetTableMetadataUtils.getRowGroupColumnStatistics(tableMetadataV4, rowGroupMetadata);
                    try {
                        Map<SchemaPath, TypeProtos.MajorType> intermediateColumns = ParquetTableMetadataUtils.getIntermediateFields(tableMetadataV4, rowGroupMetadata);
                        Map<SchemaPath, TypeProtos.MajorType> rowGroupFields = ParquetTableMetadataUtils.getRowGroupFields(tableMetadataV4, rowGroupMetadata);
                        TupleMetadata rowGroupSchema = new TupleSchema();
                        rowGroupFields.forEach((schemaPath, majorType) -> SchemaPathUtils.addColumnMetadata(rowGroupSchema, schemaPath, majorType, intermediateColumns));
                        // updates filter predicate to add required casts for the case when row group schema differs from the table schema
                        if (!rowGroupSchema.isEquivalent(rowGroupScan.getSchema())) {
                            filterPredicate = AbstractGroupScanWithMetadata.getFilterPredicate(filterExpr, context, context.getFunctionRegistry(), context.getOptions(), true, true, /* supports file implicit columns */
                            rowGroupSchema);
                        }
                        matchResult = FilterEvaluatorUtils.matches(filterPredicate, columnsStatistics, footerRowCount, rowGroupSchema, schemaPathsInExpr);
                        // collect logging info
                        long timeToRead = pruneTimer.elapsed(TimeUnit.MICROSECONDS);
                        totalPruneTime += timeToRead;
                        // trace each single row group
                        logger.trace(// trace each single row group
                        "Run-time pruning: {} row-group {} (RG index: {} row count: {}), took {} usec", matchResult == RowsMatch.NONE ? "Excluded" : "Included", rowGroup.getPath(), rowGroupIndex, footerRowCount, timeToRead);
                    } catch (Exception e) {
                        // in case some unexpected exception is raised
                        logger.warn("Run-time pruning check failed - {}. Skip pruning rowgroup - {}", e.getMessage(), rowGroup.getPath());
                        logger.debug("Failure during run-time pruning: {}", e.getMessage(), e);
                    }
                }
                pruneTimer.stop();
                pruneTimer.reset();
                // If this row group failed the match - skip it (i.e., no reader for this rowgroup)
                if (matchResult == RowsMatch.NONE) {
                    // one more RG was pruned
                    rowGroupsPruned++;
                    if (firstRowGroup == null) {
                        // keep the first RG, to be used in case all row groups are pruned
                        firstRowGroup = rowGroup;
                        firstFooter = footer;
                    }
                    // This Row group does not comply with the filter - prune it out and check the next Row Group
                    continue;
                }
            }
            mapWithMaxColumns = createReaderAndImplicitColumns(context, rowGroupScan, oContext, columnExplorer, readers, implicitColumns, mapWithMaxColumns, rowGroup, fs, footer, false);
        }
        // in case all row groups were pruned out - create a single reader for the first one (so that the schema could be returned)
        if (readers.isEmpty() && firstRowGroup != null) {
            DrillFileSystem fs = fsManager.get(rowGroupScan.getFsConf(firstRowGroup), firstRowGroup.getPath());
            mapWithMaxColumns = createReaderAndImplicitColumns(context, rowGroupScan, oContext, columnExplorer, readers, implicitColumns, mapWithMaxColumns, firstRowGroup, fs, firstFooter, true);
        }
        // do some logging, if relevant
        if (totalPruneTime > 0) {
            logger.info("Finished parquet_runtime_pruning in {} usec. Out of given {} rowgroups, {} were pruned. {}", totalPruneTime, totalRowGroups, rowGroupsPruned, totalRowGroups == rowGroupsPruned ? "ALL_PRUNED !!" : "");
        }
        // Update stats (same in every reader - the others would just overwrite the stats)
        for (CommonParquetRecordReader rr : readers) {
            rr.updateRowGroupsStats(totalRowGroups, rowGroupsPruned);
        }
    } catch (IOException | InterruptedException e) {
        throw new ExecutionSetupException(e);
    }
    // all readers should have the same number of implicit columns, add missing ones with value null
    Map<String, String> diff = Maps.transformValues(mapWithMaxColumns, Functions.constant(null));
    for (Map<String, String> map : implicitColumns) {
        map.putAll(Maps.difference(map, diff).entriesOnlyOnRight());
    }
    return new ScanBatch(context, oContext, readers, implicitColumns);
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) Stopwatch(org.apache.drill.shaded.guava.com.google.common.base.Stopwatch) LinkedHashMap(java.util.LinkedHashMap) Metadata_V4(org.apache.drill.exec.store.parquet.metadata.Metadata_V4) SchemaPath(org.apache.drill.common.expression.SchemaPath) ScanBatch(org.apache.drill.exec.physical.impl.ScanBatch) LinkedList(java.util.LinkedList) ColumnExplorer(org.apache.drill.exec.store.ColumnExplorer) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) MetadataBase(org.apache.drill.exec.store.parquet.metadata.MetadataBase) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) ValueExpressions(org.apache.drill.common.expression.ValueExpressions) TupleSchema(org.apache.drill.exec.record.metadata.TupleSchema) LogicalExpression(org.apache.drill.common.expression.LogicalExpression) DrillFileSystem(org.apache.drill.exec.store.dfs.DrillFileSystem) Path(org.apache.hadoop.fs.Path) SchemaPath(org.apache.drill.common.expression.SchemaPath) ColumnStatistics(org.apache.drill.metastore.statistics.ColumnStatistics) IOException(java.io.IOException) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) IOException(java.io.IOException) RowsMatch(org.apache.drill.exec.expr.stat.RowsMatch) CommonParquetRecordReader(org.apache.drill.exec.store.CommonParquetRecordReader)

Aggregations

ArrayList (java.util.ArrayList)2 LinkedHashMap (java.util.LinkedHashMap)2 SchemaPath (org.apache.drill.common.expression.SchemaPath)2 MetadataSummary (org.apache.drill.exec.store.parquet.metadata.Metadata_V4.MetadataSummary)2 ParquetFileAndRowCountMetadata (org.apache.drill.exec.store.parquet.metadata.Metadata_V4.ParquetFileAndRowCountMetadata)2 ParquetFileMetadata_v4 (org.apache.drill.exec.store.parquet.metadata.Metadata_V4.ParquetFileMetadata_v4)2 ParquetTableMetadata_v4 (org.apache.drill.exec.store.parquet.metadata.Metadata_V4.ParquetTableMetadata_v4)2 Stopwatch (org.apache.drill.shaded.guava.com.google.common.base.Stopwatch)2 FileStatus (org.apache.hadoop.fs.FileStatus)2 Path (org.apache.hadoop.fs.Path)2 IOException (java.io.IOException)1 HashMap (java.util.HashMap)1 LinkedList (java.util.LinkedList)1 Map (java.util.Map)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 ExecutionSetupException (org.apache.drill.common.exceptions.ExecutionSetupException)1 LogicalExpression (org.apache.drill.common.expression.LogicalExpression)1 ValueExpressions (org.apache.drill.common.expression.ValueExpressions)1 RowsMatch (org.apache.drill.exec.expr.stat.RowsMatch)1 ScanBatch (org.apache.drill.exec.physical.impl.ScanBatch)1