Search in sources :

Example 6 with SelectionVector2

use of org.apache.drill.exec.record.selection.SelectionVector2 in project drill by apache.

the class ExternalSortBatch method newSV2.

/**
   * Allocate and initialize the selection vector used as the sort index.
   * Assumes that memory is available for the vector since memory management
   * ensured space is available.
   *
   * @return a new, populated selection vector 2
   */
private SelectionVector2 newSV2() {
    SelectionVector2 sv2 = new SelectionVector2(allocator);
    if (!sv2.allocateNewSafe(incoming.getRecordCount())) {
        throw UserException.resourceError(new OutOfMemoryException("Unable to allocate sv2 buffer")).build(logger);
    }
    for (int i = 0; i < incoming.getRecordCount(); i++) {
        sv2.setIndex(i, (char) i);
    }
    sv2.setRecordCount(incoming.getRecordCount());
    return sv2;
}
Also used : SelectionVector2(org.apache.drill.exec.record.selection.SelectionVector2) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException)

Example 7 with SelectionVector2

use of org.apache.drill.exec.record.selection.SelectionVector2 in project drill by apache.

the class SortRecordBatchBuilder method add.

public void add(RecordBatchData rbd) {
    long batchBytes = getSize(rbd.getContainer());
    if (batchBytes == 0 && batches.size() > 0) {
        return;
    }
    if (runningBatches >= Character.MAX_VALUE) {
        final String errMsg = String.format("Tried to add more than %d number of batches.", (int) Character.MAX_VALUE);
        logger.error(errMsg);
        throw new DrillRuntimeException(errMsg);
    }
    if (!reservation.add(rbd.getRecordCount() * 4)) {
        final String errMsg = String.format("Failed to pre-allocate memory for SV. " + "Existing recordCount*4 = %d, " + "incoming batch recordCount*4 = %d", recordCount * 4, rbd.getRecordCount() * 4);
        logger.error(errMsg);
        throw new DrillRuntimeException(errMsg);
    }
    if (rbd.getRecordCount() == 0 && batches.size() > 0) {
        rbd.getContainer().zeroVectors();
        SelectionVector2 sv2 = rbd.getSv2();
        if (sv2 != null) {
            sv2.clear();
        }
        return;
    }
    runningBatches++;
    batches.put(rbd.getContainer().getSchema(), rbd);
    recordCount += rbd.getRecordCount();
}
Also used : SelectionVector2(org.apache.drill.exec.record.selection.SelectionVector2) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException)

Example 8 with SelectionVector2

use of org.apache.drill.exec.record.selection.SelectionVector2 in project drill by apache.

the class DrillTestWrapper method addToCombinedVectorResults.

/**
   * Add to result vectors and compare batch schema against expected schema while iterating batches.
   * @param batches
   * @param  expectedSchema: the expected schema the batches should contain. Through SchemaChangeException
   *                       if encounter different batch schema.
   * @return
   * @throws SchemaChangeException
   * @throws UnsupportedEncodingException
   */
public static Map<String, List<Object>> addToCombinedVectorResults(Iterable<VectorAccessible> batches, BatchSchema expectedSchema) throws SchemaChangeException, UnsupportedEncodingException {
    // TODO - this does not handle schema changes
    Map<String, List<Object>> combinedVectors = new TreeMap<>();
    long totalRecords = 0;
    BatchSchema schema = null;
    for (VectorAccessible loader : batches) {
        if (expectedSchema != null) {
            if (!expectedSchema.equals(loader.getSchema())) {
                throw new SchemaChangeException(String.format("Batch schema does not match expected schema\n" + "Actual schema: %s.  Expected schema : %s", loader.getSchema(), expectedSchema));
            }
        }
        // SchemaChangeException, so check/clean throws clause above.
        if (schema == null) {
            schema = loader.getSchema();
            for (MaterializedField mf : schema) {
                combinedVectors.put(SchemaPath.getSimplePath(mf.getPath()).toExpr(), new ArrayList<Object>());
            }
        } else {
            // TODO - actually handle schema changes, this is just to get access to the SelectionVectorMode
            // of the current batch, the check for a null schema is used to only mutate the schema once
            // need to add new vectors and null fill for previous batches? distinction between null and non-existence important?
            schema = loader.getSchema();
        }
        logger.debug("reading batch with " + loader.getRecordCount() + " rows, total read so far " + totalRecords);
        totalRecords += loader.getRecordCount();
        for (VectorWrapper<?> w : loader) {
            String field = SchemaPath.getSimplePath(w.getField().getPath()).toExpr();
            ValueVector[] vectors;
            if (w.isHyper()) {
                vectors = w.getValueVectors();
            } else {
                vectors = new ValueVector[] { w.getValueVector() };
            }
            SelectionVector2 sv2 = null;
            SelectionVector4 sv4 = null;
            switch(schema.getSelectionVectorMode()) {
                case TWO_BYTE:
                    sv2 = loader.getSelectionVector2();
                    break;
                case FOUR_BYTE:
                    sv4 = loader.getSelectionVector4();
                    break;
            }
            if (sv4 != null) {
                for (int j = 0; j < sv4.getCount(); j++) {
                    int complexIndex = sv4.get(j);
                    int batchIndex = complexIndex >> 16;
                    int recordIndexInBatch = complexIndex & 65535;
                    Object obj = vectors[batchIndex].getAccessor().getObject(recordIndexInBatch);
                    if (obj != null) {
                        if (obj instanceof Text) {
                            obj = obj.toString();
                        }
                    }
                    combinedVectors.get(field).add(obj);
                }
            } else {
                for (ValueVector vv : vectors) {
                    for (int j = 0; j < loader.getRecordCount(); j++) {
                        int index;
                        if (sv2 != null) {
                            index = sv2.getIndex(j);
                        } else {
                            index = j;
                        }
                        Object obj = vv.getAccessor().getObject(index);
                        if (obj != null) {
                            if (obj instanceof Text) {
                                obj = obj.toString();
                            }
                        }
                        combinedVectors.get(field).add(obj);
                    }
                }
            }
        }
    }
    return combinedVectors;
}
Also used : VectorAccessible(org.apache.drill.exec.record.VectorAccessible) MaterializedField(org.apache.drill.exec.record.MaterializedField) Text(org.apache.drill.exec.util.Text) TreeMap(java.util.TreeMap) ValueVector(org.apache.drill.exec.vector.ValueVector) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) BatchSchema(org.apache.drill.exec.record.BatchSchema) SelectionVector2(org.apache.drill.exec.record.selection.SelectionVector2) ArrayList(java.util.ArrayList) List(java.util.List) SelectionVector4(org.apache.drill.exec.record.selection.SelectionVector4)

Example 9 with SelectionVector2

use of org.apache.drill.exec.record.selection.SelectionVector2 in project drill by apache.

the class FilterRecordBatch method setupNewSchema.

@Override
protected boolean setupNewSchema() throws SchemaChangeException {
    if (sv2 != null) {
        sv2.clear();
    }
    switch(incoming.getSchema().getSelectionVectorMode()) {
        case NONE:
            if (sv2 == null) {
                sv2 = new SelectionVector2(oContext.getAllocator());
            }
            this.filter = generateSV2Filterer();
            break;
        case TWO_BYTE:
            sv2 = new SelectionVector2(oContext.getAllocator());
            this.filter = generateSV2Filterer();
            break;
        case FOUR_BYTE:
        /*
         * Filter does not support SV4 handling. There are couple of minor issues in the
         * logic that handles SV4 + filter should always be pushed beyond sort so disabling
         * it in FilterPrel.
         *

        // set up the multi-batch selection vector
        this.svAllocator = oContext.getAllocator().getNewPreAllocator();
        if (!svAllocator.preAllocate(incoming.getRecordCount()*4))
          throw new SchemaChangeException("Attempted to filter an SV4 which exceeds allowed memory (" +
                                          incoming.getRecordCount() * 4 + " bytes)");
        sv4 = new SelectionVector4(svAllocator.getAllocation(), incoming.getRecordCount(), Character.MAX_VALUE);
        this.filter = generateSV4Filterer();
        break;
        */
        default:
            throw new UnsupportedOperationException();
    }
    if (container.isSchemaChanged()) {
        container.buildSchema(SelectionVectorMode.TWO_BYTE);
        return true;
    }
    return false;
}
Also used : SelectionVector2(org.apache.drill.exec.record.selection.SelectionVector2)

Example 10 with SelectionVector2

use of org.apache.drill.exec.record.selection.SelectionVector2 in project drill by apache.

the class ExternalSortBatch method innerNext.

@SuppressWarnings("resource")
@Override
public IterOutcome innerNext() {
    if (schema != null) {
        if (spillCount == 0) {
            return (getSelectionVector4().next()) ? IterOutcome.OK : IterOutcome.NONE;
        } else {
            Stopwatch w = Stopwatch.createStarted();
            int count = copier.next(targetRecordCount);
            if (count > 0) {
                long t = w.elapsed(TimeUnit.MICROSECONDS);
                logger.debug("Took {} us to merge {} records", t, count);
                container.setRecordCount(count);
                return IterOutcome.OK;
            } else {
                logger.debug("copier returned 0 records");
                return IterOutcome.NONE;
            }
        }
    }
    int totalCount = 0;
    // total number of batches received so far
    int totalBatches = 0;
    try {
        container.clear();
        outer: while (true) {
            IterOutcome upstream;
            if (first) {
                upstream = IterOutcome.OK_NEW_SCHEMA;
            } else {
                upstream = next(incoming);
            }
            if (upstream == IterOutcome.OK && sorter == null) {
                upstream = IterOutcome.OK_NEW_SCHEMA;
            }
            switch(upstream) {
                case NONE:
                    if (first) {
                        return upstream;
                    }
                    break outer;
                case NOT_YET:
                    throw new UnsupportedOperationException();
                case STOP:
                    return upstream;
                case OK_NEW_SCHEMA:
                case OK:
                    VectorContainer convertedBatch;
                    // only change in the case that the schema truly changes.  Artificial schema changes are ignored.
                    if (upstream == IterOutcome.OK_NEW_SCHEMA && !incoming.getSchema().equals(schema)) {
                        if (schema != null) {
                            if (unionTypeEnabled) {
                                this.schema = SchemaUtil.mergeSchemas(schema, incoming.getSchema());
                            } else {
                                throw SchemaChangeException.schemaChanged("Schema changes not supported in External Sort. Please enable Union type", schema, incoming.getSchema());
                            }
                        } else {
                            schema = incoming.getSchema();
                        }
                        convertedBatch = SchemaUtil.coerceContainer(incoming, schema, oContext);
                        for (BatchGroup b : batchGroups) {
                            b.setSchema(schema);
                        }
                        for (BatchGroup b : spilledBatchGroups) {
                            b.setSchema(schema);
                        }
                        this.sorter = createNewSorter(context, convertedBatch);
                    } else {
                        convertedBatch = SchemaUtil.coerceContainer(incoming, schema, oContext);
                    }
                    if (first) {
                        first = false;
                    }
                    if (convertedBatch.getRecordCount() == 0) {
                        for (VectorWrapper<?> w : convertedBatch) {
                            w.clear();
                        }
                        break;
                    }
                    SelectionVector2 sv2;
                    if (incoming.getSchema().getSelectionVectorMode() == BatchSchema.SelectionVectorMode.TWO_BYTE) {
                        sv2 = incoming.getSelectionVector2().clone();
                    } else {
                        try {
                            sv2 = newSV2();
                        } catch (InterruptedException e) {
                            return IterOutcome.STOP;
                        } catch (OutOfMemoryException e) {
                            throw new OutOfMemoryException(e);
                        }
                    }
                    int count = sv2.getCount();
                    totalCount += count;
                    totalBatches++;
                    sorter.setup(context, sv2, convertedBatch);
                    sorter.sort(sv2);
                    RecordBatchData rbd = new RecordBatchData(convertedBatch, oAllocator);
                    boolean success = false;
                    try {
                        rbd.setSv2(sv2);
                        batchGroups.add(new BatchGroup(rbd.getContainer(), rbd.getSv2(), oContext));
                        if (peakNumBatches < batchGroups.size()) {
                            peakNumBatches = batchGroups.size();
                            stats.setLongStat(Metric.PEAK_BATCHES_IN_MEMORY, peakNumBatches);
                        }
                        batchesSinceLastSpill++;
                        if (// If we haven't spilled so far, do we have enough memory for MSorter if this turns out to be the last incoming batch?
                        (spillCount == 0 && !hasMemoryForInMemorySort(totalCount)) || // If we haven't spilled so far, make sure we don't exceed the maximum number of batches SV4 can address
                        (spillCount == 0 && totalBatches > Character.MAX_VALUE) || // current memory used is more than 95% of memory usage limit of this operator
                        (oAllocator.getAllocatedMemory() > .95 * oAllocator.getLimit()) || // since the last spill exceed the defined limit
                        (batchGroups.size() > SPILL_THRESHOLD && batchesSinceLastSpill >= SPILL_BATCH_GROUP_SIZE)) {
                            if (firstSpillBatchCount == 0) {
                                firstSpillBatchCount = batchGroups.size();
                            }
                            if (spilledBatchGroups.size() > firstSpillBatchCount / 2) {
                                logger.info("Merging spills");
                                final BatchGroup merged = mergeAndSpill(spilledBatchGroups);
                                if (merged != null) {
                                    spilledBatchGroups.addFirst(merged);
                                }
                            }
                            final BatchGroup merged = mergeAndSpill(batchGroups);
                            if (merged != null) {
                                // make sure we don't add null to spilledBatchGroups
                                spilledBatchGroups.add(merged);
                                batchesSinceLastSpill = 0;
                            }
                        }
                        success = true;
                    } finally {
                        if (!success) {
                            rbd.clear();
                        }
                    }
                    break;
                case OUT_OF_MEMORY:
                    logger.debug("received OUT_OF_MEMORY, trying to spill");
                    if (batchesSinceLastSpill > 2) {
                        final BatchGroup merged = mergeAndSpill(batchGroups);
                        if (merged != null) {
                            spilledBatchGroups.add(merged);
                            batchesSinceLastSpill = 0;
                        }
                    } else {
                        logger.debug("not enough batches to spill, sending OUT_OF_MEMORY downstream");
                        return IterOutcome.OUT_OF_MEMORY;
                    }
                    break;
                default:
                    throw new UnsupportedOperationException();
            }
        }
        if (totalCount == 0) {
            return IterOutcome.NONE;
        }
        if (spillCount == 0) {
            if (builder != null) {
                builder.clear();
                builder.close();
            }
            builder = new SortRecordBatchBuilder(oAllocator);
            for (BatchGroup group : batchGroups) {
                RecordBatchData rbd = new RecordBatchData(group.getContainer(), oAllocator);
                rbd.setSv2(group.getSv2());
                builder.add(rbd);
            }
            builder.build(context, container);
            sv4 = builder.getSv4();
            mSorter = createNewMSorter();
            mSorter.setup(context, oAllocator, getSelectionVector4(), this.container);
            // For testing memory-leak purpose, inject exception after mSorter finishes setup
            injector.injectUnchecked(context.getExecutionControls(), INTERRUPTION_AFTER_SETUP);
            mSorter.sort(this.container);
            // sort may have prematurely exited due to should continue returning false.
            if (!context.shouldContinue()) {
                return IterOutcome.STOP;
            }
            // For testing memory-leak purpose, inject exception after mSorter finishes sorting
            injector.injectUnchecked(context.getExecutionControls(), INTERRUPTION_AFTER_SORT);
            sv4 = mSorter.getSV4();
            container.buildSchema(SelectionVectorMode.FOUR_BYTE);
        } else {
            // some batches were spilled
            final BatchGroup merged = mergeAndSpill(batchGroups);
            if (merged != null) {
                spilledBatchGroups.add(merged);
            }
            batchGroups.addAll(spilledBatchGroups);
            // no need to cleanup spilledBatchGroups, all it's batches are in batchGroups now
            spilledBatchGroups = null;
            logger.warn("Starting to merge. {} batch groups. Current allocated memory: {}", batchGroups.size(), oAllocator.getAllocatedMemory());
            VectorContainer hyperBatch = constructHyperBatch(batchGroups);
            createCopier(hyperBatch, batchGroups, container, false);
            int estimatedRecordSize = 0;
            for (VectorWrapper<?> w : batchGroups.get(0)) {
                try {
                    estimatedRecordSize += TypeHelper.getSize(w.getField().getType());
                } catch (UnsupportedOperationException e) {
                    estimatedRecordSize += 50;
                }
            }
            targetRecordCount = Math.min(MAX_BATCH_SIZE, Math.max(1, COPIER_BATCH_MEM_LIMIT / estimatedRecordSize));
            int count = copier.next(targetRecordCount);
            container.buildSchema(SelectionVectorMode.NONE);
            container.setRecordCount(count);
        }
        return IterOutcome.OK_NEW_SCHEMA;
    } catch (SchemaChangeException ex) {
        kill(false);
        context.fail(UserException.unsupportedError(ex).message("Sort doesn't currently support sorts with changing schemas").build(logger));
        return IterOutcome.STOP;
    } catch (ClassTransformationException | IOException ex) {
        kill(false);
        context.fail(ex);
        return IterOutcome.STOP;
    } catch (UnsupportedOperationException e) {
        throw new RuntimeException(e);
    }
}
Also used : ClassTransformationException(org.apache.drill.exec.exception.ClassTransformationException) RecordBatchData(org.apache.drill.exec.physical.impl.sort.RecordBatchData) VectorWrapper(org.apache.drill.exec.record.VectorWrapper) Stopwatch(com.google.common.base.Stopwatch) SortRecordBatchBuilder(org.apache.drill.exec.physical.impl.sort.SortRecordBatchBuilder) IOException(java.io.IOException) VectorContainer(org.apache.drill.exec.record.VectorContainer) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) SelectionVector2(org.apache.drill.exec.record.selection.SelectionVector2) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException)

Aggregations

SelectionVector2 (org.apache.drill.exec.record.selection.SelectionVector2)10 OutOfMemoryException (org.apache.drill.exec.exception.OutOfMemoryException)5 SchemaChangeException (org.apache.drill.exec.exception.SchemaChangeException)4 Stopwatch (com.google.common.base.Stopwatch)2 IOException (java.io.IOException)2 RecordBatchData (org.apache.drill.exec.physical.impl.sort.RecordBatchData)2 VectorContainer (org.apache.drill.exec.record.VectorContainer)2 DrillBuf (io.netty.buffer.DrillBuf)1 ArrayList (java.util.ArrayList)1 List (java.util.List)1 TreeMap (java.util.TreeMap)1 DrillRuntimeException (org.apache.drill.common.exceptions.DrillRuntimeException)1 UserException (org.apache.drill.common.exceptions.UserException)1 ClassTransformationException (org.apache.drill.exec.exception.ClassTransformationException)1 SortRecordBatchBuilder (org.apache.drill.exec.physical.impl.sort.SortRecordBatchBuilder)1 RecordBatchSizer (org.apache.drill.exec.physical.impl.spill.RecordBatchSizer)1 SingleBatchSorter (org.apache.drill.exec.physical.impl.xsort.SingleBatchSorter)1 InputBatch (org.apache.drill.exec.physical.impl.xsort.managed.BatchGroup.InputBatch)1 BatchSchema (org.apache.drill.exec.record.BatchSchema)1 ExpandableHyperContainer (org.apache.drill.exec.record.ExpandableHyperContainer)1