Search in sources :

Example 21 with VectorContainer

use of org.apache.drill.exec.record.VectorContainer in project drill by apache.

the class HashJoinBatch method buildSchema.

@Override
protected void buildSchema() throws SchemaChangeException {
    leftUpstream = next(left);
    rightUpstream = next(right);
    if (leftUpstream == IterOutcome.STOP || rightUpstream == IterOutcome.STOP) {
        state = BatchState.STOP;
        return;
    }
    if (leftUpstream == IterOutcome.OUT_OF_MEMORY || rightUpstream == IterOutcome.OUT_OF_MEMORY) {
        state = BatchState.OUT_OF_MEMORY;
        return;
    }
    // Initialize the hash join helper context
    hjHelper = new HashJoinHelper(context, oContext.getAllocator());
    try {
        rightSchema = right.getSchema();
        final VectorContainer vectors = new VectorContainer(oContext);
        for (final VectorWrapper<?> w : right) {
            vectors.addOrGet(w.getField());
        }
        vectors.buildSchema(SelectionVectorMode.NONE);
        vectors.setRecordCount(0);
        hyperContainer = new ExpandableHyperContainer(vectors);
        hjHelper.addNewBatch(0);
        buildBatchIndex++;
        setupHashTable();
        hashJoinProbe = setupHashJoinProbe();
        // Build the container schema and set the counts
        for (final VectorWrapper<?> w : container) {
            w.getValueVector().allocateNew();
        }
        container.buildSchema(BatchSchema.SelectionVectorMode.NONE);
        container.setRecordCount(outputRecords);
    } catch (IOException | ClassTransformationException e) {
        throw new SchemaChangeException(e);
    }
}
Also used : ExpandableHyperContainer(org.apache.drill.exec.record.ExpandableHyperContainer) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) ClassTransformationException(org.apache.drill.exec.exception.ClassTransformationException) IOException(java.io.IOException) VectorContainer(org.apache.drill.exec.record.VectorContainer)

Example 22 with VectorContainer

use of org.apache.drill.exec.record.VectorContainer in project drill by apache.

the class DumpCat method doQuery.

/**
   * Querymode:
   * $drill-dumpcat --file=local:///tmp/drilltrace/[queryid]_[tag]_[majorid]_[minor]_[operator]
   *   Batches: 135
   *   Records: 53,214/53,214 // the first one is the selected records.  The second number is the total number of records.
   *   Selected Records: 53,214
   *   Average Record Size: 74 bytes
   *   Total Data Size: 12,345 bytes
   *   Number of Empty Batches: 1
   *   Schema changes: 1
   *   Schema change batch indices: 0
   * @throws Exception
   */
protected void doQuery(FileInputStream input) throws Exception {
    int batchNum = 0;
    int emptyBatchNum = 0;
    BatchSchema prevSchema = null;
    final List<Integer> schemaChangeIdx = Lists.newArrayList();
    final BatchMetaInfo aggBatchMetaInfo = new BatchMetaInfo();
    while (input.available() > 0) {
        final VectorAccessibleSerializable vcSerializable = new VectorAccessibleSerializable(DumpCat.allocator);
        vcSerializable.readFromStream(input);
        final VectorContainer vectorContainer = (VectorContainer) vcSerializable.get();
        aggBatchMetaInfo.add(getBatchMetaInfo(vcSerializable));
        if (vectorContainer.getRecordCount() == 0) {
            emptyBatchNum++;
        }
        if (prevSchema != null && !vectorContainer.getSchema().equals(prevSchema)) {
            schemaChangeIdx.add(batchNum);
        }
        prevSchema = vectorContainer.getSchema();
        batchNum++;
        vectorContainer.zeroVectors();
    }
    /* output the summary stat */
    System.out.println(String.format("Total # of batches: %d", batchNum));
    //output: rows, selectedRows, avg rec size, total data size.
    System.out.println(aggBatchMetaInfo.toString());
    System.out.println(String.format("Empty batch : %d", emptyBatchNum));
    System.out.println(String.format("Schema changes : %d", schemaChangeIdx.size()));
    System.out.println(String.format("Schema change batch index : %s", schemaChangeIdx.toString()));
}
Also used : VectorAccessibleSerializable(org.apache.drill.exec.cache.VectorAccessibleSerializable) BatchSchema(org.apache.drill.exec.record.BatchSchema) VectorContainer(org.apache.drill.exec.record.VectorContainer)

Example 23 with VectorContainer

use of org.apache.drill.exec.record.VectorContainer in project drill by apache.

the class BatchGroup method getBatch.

private VectorContainer getBatch() throws IOException {
    assert fs != null;
    assert path != null;
    if (inputStream == null) {
        inputStream = fs.open(path);
    }
    VectorAccessibleSerializable vas = new VectorAccessibleSerializable(allocator);
    Stopwatch watch = Stopwatch.createStarted();
    vas.readFromStream(inputStream);
    VectorContainer c = vas.get();
    if (schema != null) {
        c = SchemaUtil.coerceContainer(c, schema, context);
    }
    logger.trace("Took {} us to read {} records", watch.elapsed(TimeUnit.MICROSECONDS), c.getRecordCount());
    spilledBatches--;
    currentContainer.zeroVectors();
    Iterator<VectorWrapper<?>> wrapperIterator = c.iterator();
    for (VectorWrapper w : currentContainer) {
        TransferPair pair = wrapperIterator.next().getValueVector().makeTransferPair(w.getValueVector());
        pair.transfer();
    }
    currentContainer.setRecordCount(c.getRecordCount());
    c.zeroVectors();
    return c;
}
Also used : TransferPair(org.apache.drill.exec.record.TransferPair) VectorAccessibleSerializable(org.apache.drill.exec.cache.VectorAccessibleSerializable) VectorWrapper(org.apache.drill.exec.record.VectorWrapper) Stopwatch(com.google.common.base.Stopwatch) VectorContainer(org.apache.drill.exec.record.VectorContainer)

Example 24 with VectorContainer

use of org.apache.drill.exec.record.VectorContainer in project drill by apache.

the class ExternalSortBatch method innerNext.

@SuppressWarnings("resource")
@Override
public IterOutcome innerNext() {
    if (schema != null) {
        if (spillCount == 0) {
            return (getSelectionVector4().next()) ? IterOutcome.OK : IterOutcome.NONE;
        } else {
            Stopwatch w = Stopwatch.createStarted();
            int count = copier.next(targetRecordCount);
            if (count > 0) {
                long t = w.elapsed(TimeUnit.MICROSECONDS);
                logger.debug("Took {} us to merge {} records", t, count);
                container.setRecordCount(count);
                return IterOutcome.OK;
            } else {
                logger.debug("copier returned 0 records");
                return IterOutcome.NONE;
            }
        }
    }
    int totalCount = 0;
    // total number of batches received so far
    int totalBatches = 0;
    try {
        container.clear();
        outer: while (true) {
            IterOutcome upstream;
            if (first) {
                upstream = IterOutcome.OK_NEW_SCHEMA;
            } else {
                upstream = next(incoming);
            }
            if (upstream == IterOutcome.OK && sorter == null) {
                upstream = IterOutcome.OK_NEW_SCHEMA;
            }
            switch(upstream) {
                case NONE:
                    if (first) {
                        return upstream;
                    }
                    break outer;
                case NOT_YET:
                    throw new UnsupportedOperationException();
                case STOP:
                    return upstream;
                case OK_NEW_SCHEMA:
                case OK:
                    VectorContainer convertedBatch;
                    // only change in the case that the schema truly changes.  Artificial schema changes are ignored.
                    if (upstream == IterOutcome.OK_NEW_SCHEMA && !incoming.getSchema().equals(schema)) {
                        if (schema != null) {
                            if (unionTypeEnabled) {
                                this.schema = SchemaUtil.mergeSchemas(schema, incoming.getSchema());
                            } else {
                                throw SchemaChangeException.schemaChanged("Schema changes not supported in External Sort. Please enable Union type", schema, incoming.getSchema());
                            }
                        } else {
                            schema = incoming.getSchema();
                        }
                        convertedBatch = SchemaUtil.coerceContainer(incoming, schema, oContext);
                        for (BatchGroup b : batchGroups) {
                            b.setSchema(schema);
                        }
                        for (BatchGroup b : spilledBatchGroups) {
                            b.setSchema(schema);
                        }
                        this.sorter = createNewSorter(context, convertedBatch);
                    } else {
                        convertedBatch = SchemaUtil.coerceContainer(incoming, schema, oContext);
                    }
                    if (first) {
                        first = false;
                    }
                    if (convertedBatch.getRecordCount() == 0) {
                        for (VectorWrapper<?> w : convertedBatch) {
                            w.clear();
                        }
                        break;
                    }
                    SelectionVector2 sv2;
                    if (incoming.getSchema().getSelectionVectorMode() == BatchSchema.SelectionVectorMode.TWO_BYTE) {
                        sv2 = incoming.getSelectionVector2().clone();
                    } else {
                        try {
                            sv2 = newSV2();
                        } catch (InterruptedException e) {
                            return IterOutcome.STOP;
                        } catch (OutOfMemoryException e) {
                            throw new OutOfMemoryException(e);
                        }
                    }
                    int count = sv2.getCount();
                    totalCount += count;
                    totalBatches++;
                    sorter.setup(context, sv2, convertedBatch);
                    sorter.sort(sv2);
                    RecordBatchData rbd = new RecordBatchData(convertedBatch, oAllocator);
                    boolean success = false;
                    try {
                        rbd.setSv2(sv2);
                        batchGroups.add(new BatchGroup(rbd.getContainer(), rbd.getSv2(), oContext));
                        if (peakNumBatches < batchGroups.size()) {
                            peakNumBatches = batchGroups.size();
                            stats.setLongStat(Metric.PEAK_BATCHES_IN_MEMORY, peakNumBatches);
                        }
                        batchesSinceLastSpill++;
                        if (// If we haven't spilled so far, do we have enough memory for MSorter if this turns out to be the last incoming batch?
                        (spillCount == 0 && !hasMemoryForInMemorySort(totalCount)) || // If we haven't spilled so far, make sure we don't exceed the maximum number of batches SV4 can address
                        (spillCount == 0 && totalBatches > Character.MAX_VALUE) || // current memory used is more than 95% of memory usage limit of this operator
                        (oAllocator.getAllocatedMemory() > .95 * oAllocator.getLimit()) || // since the last spill exceed the defined limit
                        (batchGroups.size() > SPILL_THRESHOLD && batchesSinceLastSpill >= SPILL_BATCH_GROUP_SIZE)) {
                            if (firstSpillBatchCount == 0) {
                                firstSpillBatchCount = batchGroups.size();
                            }
                            if (spilledBatchGroups.size() > firstSpillBatchCount / 2) {
                                logger.info("Merging spills");
                                final BatchGroup merged = mergeAndSpill(spilledBatchGroups);
                                if (merged != null) {
                                    spilledBatchGroups.addFirst(merged);
                                }
                            }
                            final BatchGroup merged = mergeAndSpill(batchGroups);
                            if (merged != null) {
                                // make sure we don't add null to spilledBatchGroups
                                spilledBatchGroups.add(merged);
                                batchesSinceLastSpill = 0;
                            }
                        }
                        success = true;
                    } finally {
                        if (!success) {
                            rbd.clear();
                        }
                    }
                    break;
                case OUT_OF_MEMORY:
                    logger.debug("received OUT_OF_MEMORY, trying to spill");
                    if (batchesSinceLastSpill > 2) {
                        final BatchGroup merged = mergeAndSpill(batchGroups);
                        if (merged != null) {
                            spilledBatchGroups.add(merged);
                            batchesSinceLastSpill = 0;
                        }
                    } else {
                        logger.debug("not enough batches to spill, sending OUT_OF_MEMORY downstream");
                        return IterOutcome.OUT_OF_MEMORY;
                    }
                    break;
                default:
                    throw new UnsupportedOperationException();
            }
        }
        if (totalCount == 0) {
            return IterOutcome.NONE;
        }
        if (spillCount == 0) {
            if (builder != null) {
                builder.clear();
                builder.close();
            }
            builder = new SortRecordBatchBuilder(oAllocator);
            for (BatchGroup group : batchGroups) {
                RecordBatchData rbd = new RecordBatchData(group.getContainer(), oAllocator);
                rbd.setSv2(group.getSv2());
                builder.add(rbd);
            }
            builder.build(context, container);
            sv4 = builder.getSv4();
            mSorter = createNewMSorter();
            mSorter.setup(context, oAllocator, getSelectionVector4(), this.container);
            // For testing memory-leak purpose, inject exception after mSorter finishes setup
            injector.injectUnchecked(context.getExecutionControls(), INTERRUPTION_AFTER_SETUP);
            mSorter.sort(this.container);
            // sort may have prematurely exited due to should continue returning false.
            if (!context.shouldContinue()) {
                return IterOutcome.STOP;
            }
            // For testing memory-leak purpose, inject exception after mSorter finishes sorting
            injector.injectUnchecked(context.getExecutionControls(), INTERRUPTION_AFTER_SORT);
            sv4 = mSorter.getSV4();
            container.buildSchema(SelectionVectorMode.FOUR_BYTE);
        } else {
            // some batches were spilled
            final BatchGroup merged = mergeAndSpill(batchGroups);
            if (merged != null) {
                spilledBatchGroups.add(merged);
            }
            batchGroups.addAll(spilledBatchGroups);
            // no need to cleanup spilledBatchGroups, all it's batches are in batchGroups now
            spilledBatchGroups = null;
            logger.warn("Starting to merge. {} batch groups. Current allocated memory: {}", batchGroups.size(), oAllocator.getAllocatedMemory());
            VectorContainer hyperBatch = constructHyperBatch(batchGroups);
            createCopier(hyperBatch, batchGroups, container, false);
            int estimatedRecordSize = 0;
            for (VectorWrapper<?> w : batchGroups.get(0)) {
                try {
                    estimatedRecordSize += TypeHelper.getSize(w.getField().getType());
                } catch (UnsupportedOperationException e) {
                    estimatedRecordSize += 50;
                }
            }
            targetRecordCount = Math.min(MAX_BATCH_SIZE, Math.max(1, COPIER_BATCH_MEM_LIMIT / estimatedRecordSize));
            int count = copier.next(targetRecordCount);
            container.buildSchema(SelectionVectorMode.NONE);
            container.setRecordCount(count);
        }
        return IterOutcome.OK_NEW_SCHEMA;
    } catch (SchemaChangeException ex) {
        kill(false);
        context.fail(UserException.unsupportedError(ex).message("Sort doesn't currently support sorts with changing schemas").build(logger));
        return IterOutcome.STOP;
    } catch (ClassTransformationException | IOException ex) {
        kill(false);
        context.fail(ex);
        return IterOutcome.STOP;
    } catch (UnsupportedOperationException e) {
        throw new RuntimeException(e);
    }
}
Also used : ClassTransformationException(org.apache.drill.exec.exception.ClassTransformationException) RecordBatchData(org.apache.drill.exec.physical.impl.sort.RecordBatchData) VectorWrapper(org.apache.drill.exec.record.VectorWrapper) Stopwatch(com.google.common.base.Stopwatch) SortRecordBatchBuilder(org.apache.drill.exec.physical.impl.sort.SortRecordBatchBuilder) IOException(java.io.IOException) VectorContainer(org.apache.drill.exec.record.VectorContainer) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) SelectionVector2(org.apache.drill.exec.record.selection.SelectionVector2) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException)

Example 25 with VectorContainer

use of org.apache.drill.exec.record.VectorContainer in project drill by apache.

the class PruneScanRule method doOnMatch.

protected void doOnMatch(RelOptRuleCall call, Filter filterRel, Project projectRel, TableScan scanRel) {
    final String pruningClassName = getClass().getName();
    logger.info("Beginning partition pruning, pruning class: {}", pruningClassName);
    Stopwatch totalPruningTime = Stopwatch.createStarted();
    final PlannerSettings settings = PrelUtil.getPlannerSettings(call.getPlanner());
    PartitionDescriptor descriptor = getPartitionDescriptor(settings, scanRel);
    final BufferAllocator allocator = optimizerContext.getAllocator();
    final Object selection = getDrillTable(scanRel).getSelection();
    MetadataContext metaContext = null;
    if (selection instanceof FormatSelection) {
        metaContext = ((FormatSelection) selection).getSelection().getMetaContext();
    }
    RexNode condition = null;
    if (projectRel == null) {
        condition = filterRel.getCondition();
    } else {
        // get the filter as if it were below the projection.
        condition = RelOptUtil.pushFilterPastProject(filterRel.getCondition(), projectRel);
    }
    RewriteAsBinaryOperators visitor = new RewriteAsBinaryOperators(true, filterRel.getCluster().getRexBuilder());
    condition = condition.accept(visitor);
    Map<Integer, String> fieldNameMap = Maps.newHashMap();
    List<String> fieldNames = scanRel.getRowType().getFieldNames();
    BitSet columnBitset = new BitSet();
    BitSet partitionColumnBitSet = new BitSet();
    Map<Integer, Integer> partitionMap = Maps.newHashMap();
    int relColIndex = 0;
    for (String field : fieldNames) {
        final Integer partitionIndex = descriptor.getIdIfValid(field);
        if (partitionIndex != null) {
            fieldNameMap.put(partitionIndex, field);
            partitionColumnBitSet.set(partitionIndex);
            columnBitset.set(relColIndex);
            // mapping between the relColIndex and partitionIndex
            partitionMap.put(relColIndex, partitionIndex);
        }
        relColIndex++;
    }
    if (partitionColumnBitSet.isEmpty()) {
        logger.info("No partition columns are projected from the scan..continue. " + "Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
        setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
        return;
    }
    // stop watch to track how long we spend in different phases of pruning
    Stopwatch miscTimer = Stopwatch.createUnstarted();
    // track how long we spend building the filter tree
    miscTimer.start();
    FindPartitionConditions c = new FindPartitionConditions(columnBitset, filterRel.getCluster().getRexBuilder());
    c.analyze(condition);
    RexNode pruneCondition = c.getFinalCondition();
    BitSet referencedDirsBitSet = c.getReferencedDirs();
    logger.info("Total elapsed time to build and analyze filter tree: {} ms", miscTimer.elapsed(TimeUnit.MILLISECONDS));
    miscTimer.reset();
    if (pruneCondition == null) {
        logger.info("No conditions were found eligible for partition pruning." + "Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
        setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
        return;
    }
    // set up the partitions
    List<PartitionLocation> newPartitions = Lists.newArrayList();
    // total number of partitions
    long numTotal = 0;
    int batchIndex = 0;
    PartitionLocation firstLocation = null;
    LogicalExpression materializedExpr = null;
    String[] spInfo = null;
    int maxIndex = -1;
    BitSet matchBitSet = new BitSet();
    // Outer loop: iterate over a list of batches of PartitionLocations
    for (List<PartitionLocation> partitions : descriptor) {
        numTotal += partitions.size();
        logger.debug("Evaluating partition pruning for batch {}", batchIndex);
        if (batchIndex == 0) {
            // save the first location in case everything is pruned
            firstLocation = partitions.get(0);
        }
        final NullableBitVector output = new NullableBitVector(MaterializedField.create("", Types.optional(MinorType.BIT)), allocator);
        final VectorContainer container = new VectorContainer();
        try {
            final ValueVector[] vectors = new ValueVector[descriptor.getMaxHierarchyLevel()];
            for (int partitionColumnIndex : BitSets.toIter(partitionColumnBitSet)) {
                SchemaPath column = SchemaPath.getSimplePath(fieldNameMap.get(partitionColumnIndex));
                MajorType type = descriptor.getVectorType(column, settings);
                MaterializedField field = MaterializedField.create(column.getAsUnescapedPath(), type);
                ValueVector v = TypeHelper.getNewVector(field, allocator);
                v.allocateNew();
                vectors[partitionColumnIndex] = v;
                container.add(v);
            }
            // track how long we spend populating partition column vectors
            miscTimer.start();
            // populate partition vectors.
            descriptor.populatePartitionVectors(vectors, partitions, partitionColumnBitSet, fieldNameMap);
            logger.info("Elapsed time to populate partitioning column vectors: {} ms within batchIndex: {}", miscTimer.elapsed(TimeUnit.MILLISECONDS), batchIndex);
            miscTimer.reset();
            // materialize the expression; only need to do this once
            if (batchIndex == 0) {
                materializedExpr = materializePruneExpr(pruneCondition, settings, scanRel, container);
                if (materializedExpr == null) {
                    // continue without partition pruning; no need to log anything here since
                    // materializePruneExpr logs it already
                    logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
                    setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
                    return;
                }
            }
            output.allocateNew(partitions.size());
            // start the timer to evaluate how long we spend in the interpreter evaluation
            miscTimer.start();
            InterpreterEvaluator.evaluate(partitions.size(), optimizerContext, container, output, materializedExpr);
            logger.info("Elapsed time in interpreter evaluation: {} ms within batchIndex: {} with # of partitions : {}", miscTimer.elapsed(TimeUnit.MILLISECONDS), batchIndex, partitions.size());
            miscTimer.reset();
            int recordCount = 0;
            int qualifiedCount = 0;
            if (descriptor.supportsMetadataCachePruning() && partitions.get(0).isCompositePartition()) /* apply single partition check only for composite partitions */
            {
                // Inner loop: within each batch iterate over the PartitionLocations
                for (PartitionLocation part : partitions) {
                    assert part.isCompositePartition();
                    if (!output.getAccessor().isNull(recordCount) && output.getAccessor().get(recordCount) == 1) {
                        newPartitions.add(part);
                        // Rather than using the PartitionLocation, get the array of partition values for the directories that are
                        // referenced by the filter since we are not interested in directory references in other parts of the query.
                        Pair<String[], Integer> p = composePartition(referencedDirsBitSet, partitionMap, vectors, recordCount);
                        String[] parts = p.getLeft();
                        int tmpIndex = p.getRight();
                        maxIndex = Math.max(maxIndex, tmpIndex);
                        if (spInfo == null) {
                            // initialization
                            spInfo = parts;
                            for (int j = 0; j <= tmpIndex; j++) {
                                if (parts[j] != null) {
                                    matchBitSet.set(j);
                                }
                            }
                        } else {
                            // compare the new partition with existing partition
                            for (int j = 0; j <= tmpIndex; j++) {
                                if (parts[j] == null || spInfo[j] == null) {
                                    // nulls don't match
                                    matchBitSet.clear(j);
                                } else {
                                    if (!parts[j].equals(spInfo[j])) {
                                        matchBitSet.clear(j);
                                    }
                                }
                            }
                        }
                        qualifiedCount++;
                    }
                    recordCount++;
                }
            } else {
                // Inner loop: within each batch iterate over the PartitionLocations
                for (PartitionLocation part : partitions) {
                    if (!output.getAccessor().isNull(recordCount) && output.getAccessor().get(recordCount) == 1) {
                        newPartitions.add(part);
                        qualifiedCount++;
                    }
                    recordCount++;
                }
            }
            logger.debug("Within batch {}: total records: {}, qualified records: {}", batchIndex, recordCount, qualifiedCount);
            batchIndex++;
        } catch (Exception e) {
            logger.warn("Exception while trying to prune partition.", e);
            logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
            setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
            // continue without partition pruning
            return;
        } finally {
            container.clear();
            if (output != null) {
                output.clear();
            }
        }
    }
    try {
        if (newPartitions.size() == numTotal) {
            logger.info("No partitions were eligible for pruning");
            return;
        }
        // handle the case all partitions are filtered out.
        boolean canDropFilter = true;
        boolean wasAllPartitionsPruned = false;
        String cacheFileRoot = null;
        if (newPartitions.isEmpty()) {
            assert firstLocation != null;
            // Add the first non-composite partition location, since execution requires schema.
            // In such case, we should not drop filter.
            newPartitions.add(firstLocation.getPartitionLocationRecursive().get(0));
            canDropFilter = false;
            // NOTE: with DRILL-4530, the PruneScanRule may be called with only a list of
            // directories first and the non-composite partition location will still return
            // directories, not files.  So, additional processing is done depending on this flag
            wasAllPartitionsPruned = true;
            logger.info("All {} partitions were pruned; added back a single partition to allow creating a schema", numTotal);
            // set the cacheFileRoot appropriately
            if (firstLocation.isCompositePartition()) {
                cacheFileRoot = descriptor.getBaseTableLocation() + firstLocation.getCompositePartitionPath();
            }
        }
        logger.info("Pruned {} partitions down to {}", numTotal, newPartitions.size());
        List<RexNode> conjuncts = RelOptUtil.conjunctions(condition);
        List<RexNode> pruneConjuncts = RelOptUtil.conjunctions(pruneCondition);
        conjuncts.removeAll(pruneConjuncts);
        RexNode newCondition = RexUtil.composeConjunction(filterRel.getCluster().getRexBuilder(), conjuncts, false);
        RewriteCombineBinaryOperators reverseVisitor = new RewriteCombineBinaryOperators(true, filterRel.getCluster().getRexBuilder());
        condition = condition.accept(reverseVisitor);
        pruneCondition = pruneCondition.accept(reverseVisitor);
        if (descriptor.supportsMetadataCachePruning() && !wasAllPartitionsPruned) {
            // if metadata cache file could potentially be used, then assign a proper cacheFileRoot
            int index = -1;
            if (!matchBitSet.isEmpty()) {
                String path = "";
                index = matchBitSet.length() - 1;
                for (int j = 0; j < matchBitSet.length(); j++) {
                    if (!matchBitSet.get(j)) {
                        // stop at the first index with no match and use the immediate
                        // previous index
                        index = j - 1;
                        break;
                    }
                }
                for (int j = 0; j <= index; j++) {
                    path += "/" + spInfo[j];
                }
                cacheFileRoot = descriptor.getBaseTableLocation() + path;
            }
            if (index != maxIndex) {
                // if multiple partitions are being selected, we should not drop the filter
                // since we are reading the cache file at a parent/ancestor level
                canDropFilter = false;
            }
        }
        RelNode inputRel = descriptor.supportsMetadataCachePruning() ? descriptor.createTableScan(newPartitions, cacheFileRoot, wasAllPartitionsPruned, metaContext) : descriptor.createTableScan(newPartitions, wasAllPartitionsPruned);
        if (projectRel != null) {
            inputRel = projectRel.copy(projectRel.getTraitSet(), Collections.singletonList(inputRel));
        }
        if (newCondition.isAlwaysTrue() && canDropFilter) {
            call.transformTo(inputRel);
        } else {
            final RelNode newFilter = filterRel.copy(filterRel.getTraitSet(), Collections.singletonList(inputRel));
            call.transformTo(newFilter);
        }
        setPruneStatus(metaContext, PruneStatus.PRUNED);
    } catch (Exception e) {
        logger.warn("Exception while using the pruned partitions.", e);
    } finally {
        logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
    }
}
Also used : PlannerSettings(org.apache.drill.exec.planner.physical.PlannerSettings) Stopwatch(com.google.common.base.Stopwatch) FormatSelection(org.apache.drill.exec.store.dfs.FormatSelection) LogicalExpression(org.apache.drill.common.expression.LogicalExpression) NullableBitVector(org.apache.drill.exec.vector.NullableBitVector) SchemaPath(org.apache.drill.common.expression.SchemaPath) PartitionDescriptor(org.apache.drill.exec.planner.PartitionDescriptor) FileSystemPartitionDescriptor(org.apache.drill.exec.planner.FileSystemPartitionDescriptor) PartitionLocation(org.apache.drill.exec.planner.PartitionLocation) MajorType(org.apache.drill.common.types.TypeProtos.MajorType) BitSet(java.util.BitSet) MaterializedField(org.apache.drill.exec.record.MaterializedField) BufferAllocator(org.apache.drill.exec.memory.BufferAllocator) VectorContainer(org.apache.drill.exec.record.VectorContainer) ValueVector(org.apache.drill.exec.vector.ValueVector) RelNode(org.apache.calcite.rel.RelNode) MetadataContext(org.apache.drill.exec.store.dfs.MetadataContext) RexNode(org.apache.calcite.rex.RexNode)

Aggregations

VectorContainer (org.apache.drill.exec.record.VectorContainer)27 ValueVector (org.apache.drill.exec.vector.ValueVector)11 MaterializedField (org.apache.drill.exec.record.MaterializedField)8 SchemaChangeException (org.apache.drill.exec.exception.SchemaChangeException)6 SelectionVector4 (org.apache.drill.exec.record.selection.SelectionVector4)6 Stopwatch (com.google.common.base.Stopwatch)5 SortRecordBatchBuilder (org.apache.drill.exec.physical.impl.sort.SortRecordBatchBuilder)5 IOException (java.io.IOException)4 SchemaPath (org.apache.drill.common.expression.SchemaPath)4 BatchSchema (org.apache.drill.exec.record.BatchSchema)4 CachedVectorContainer (org.apache.drill.exec.cache.CachedVectorContainer)3 VectorAccessibleSerializable (org.apache.drill.exec.cache.VectorAccessibleSerializable)3 VectorWrapper (org.apache.drill.exec.record.VectorWrapper)3 WritableBatch (org.apache.drill.exec.record.WritableBatch)3 DrillBuf (io.netty.buffer.DrillBuf)2 LogicalExpression (org.apache.drill.common.expression.LogicalExpression)2 MajorType (org.apache.drill.common.types.TypeProtos.MajorType)2 ClassTransformationException (org.apache.drill.exec.exception.ClassTransformationException)2 OutOfMemoryException (org.apache.drill.exec.exception.OutOfMemoryException)2 RecordBatchData (org.apache.drill.exec.physical.impl.sort.RecordBatchData)2