Search in sources :

Example 46 with TypedFieldId

use of org.apache.drill.exec.record.TypedFieldId in project drill by axbaretto.

the class OrderedPartitionRecordBatch method setupNewSchema.

/**
 * Sets up projection that will transfer all of the columns in batch, and also populate the partition column based on
 * which partition a record falls into in the partition table
 *
 * @param batch
 * @throws SchemaChangeException
 */
protected void setupNewSchema(VectorAccessible batch) throws SchemaChangeException {
    container.clear();
    final ErrorCollector collector = new ErrorCollectorImpl();
    final List<TransferPair> transfers = Lists.newArrayList();
    final ClassGenerator<OrderedPartitionProjector> cg = CodeGenerator.getRoot(OrderedPartitionProjector.TEMPLATE_DEFINITION, context.getOptions());
    for (VectorWrapper<?> vw : batch) {
        TransferPair tp = vw.getValueVector().getTransferPair(oContext.getAllocator());
        transfers.add(tp);
        container.add(tp.getTo());
    }
    cg.setMappingSet(mainMapping);
    int count = 0;
    for (Ordering od : popConfig.getOrderings()) {
        final LogicalExpression expr = ExpressionTreeMaterializer.materialize(od.getExpr(), batch, collector, context.getFunctionRegistry());
        if (collector.hasErrors()) {
            throw new SchemaChangeException("Failure while materializing expression. " + collector.toErrorString());
        }
        cg.setMappingSet(incomingMapping);
        ClassGenerator.HoldingContainer left = cg.addExpr(expr, ClassGenerator.BlkCreateMode.FALSE);
        cg.setMappingSet(partitionMapping);
        ClassGenerator.HoldingContainer right = cg.addExpr(new ValueVectorReadExpression(new TypedFieldId(expr.getMajorType(), count++)), ClassGenerator.BlkCreateMode.FALSE);
        cg.setMappingSet(mainMapping);
        // next we wrap the two comparison sides and add the expression block for the comparison.
        LogicalExpression fh = FunctionGenerationHelper.getOrderingComparator(od.nullsSortHigh(), left, right, context.getFunctionRegistry());
        ClassGenerator.HoldingContainer out = cg.addExpr(fh, ClassGenerator.BlkCreateMode.FALSE);
        JConditional jc = cg.getEvalBlock()._if(out.getValue().ne(JExpr.lit(0)));
        if (od.getDirection() == Direction.ASCENDING) {
            jc._then()._return(out.getValue());
        } else {
            jc._then()._return(out.getValue().minus());
        }
    }
    cg.getEvalBlock()._return(JExpr.lit(0));
    container.add(this.partitionKeyVector);
    container.buildSchema(batch.getSchema().getSelectionVectorMode());
    try {
        this.projector = context.getImplementationClass(cg);
        projector.setup(context, batch, this, transfers, partitionVectors, partitions, popConfig.getRef());
    } catch (ClassTransformationException | IOException e) {
        throw new SchemaChangeException("Failure while attempting to load generated class", e);
    }
}
Also used : TransferPair(org.apache.drill.exec.record.TransferPair) ClassTransformationException(org.apache.drill.exec.exception.ClassTransformationException) ErrorCollector(org.apache.drill.common.expression.ErrorCollector) IOException(java.io.IOException) ErrorCollectorImpl(org.apache.drill.common.expression.ErrorCollectorImpl) ValueVectorReadExpression(org.apache.drill.exec.expr.ValueVectorReadExpression) LogicalExpression(org.apache.drill.common.expression.LogicalExpression) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) ClassGenerator(org.apache.drill.exec.expr.ClassGenerator) TypedFieldId(org.apache.drill.exec.record.TypedFieldId) Ordering(org.apache.drill.common.logical.data.Order.Ordering) HoldingContainer(org.apache.drill.exec.expr.ClassGenerator.HoldingContainer) JConditional(com.sun.codemodel.JConditional)

Example 47 with TypedFieldId

use of org.apache.drill.exec.record.TypedFieldId in project drill by axbaretto.

the class ProjectRecordBatch method setupNewSchemaFromInput.

private void setupNewSchemaFromInput(RecordBatch incomingBatch) throws SchemaChangeException {
    if (allocationVectors != null) {
        for (final ValueVector v : allocationVectors) {
            v.clear();
        }
    }
    this.allocationVectors = Lists.newArrayList();
    if (complexWriters != null) {
        container.clear();
    } else {
        container.zeroVectors();
    }
    final List<NamedExpression> exprs = getExpressionList();
    final ErrorCollector collector = new ErrorCollectorImpl();
    final List<TransferPair> transfers = Lists.newArrayList();
    final ClassGenerator<Projector> cg = CodeGenerator.getRoot(Projector.TEMPLATE_DEFINITION, context.getOptions());
    cg.getCodeGenerator().plainJavaCapable(true);
    // Uncomment out this line to debug the generated code.
    // cg.getCodeGenerator().saveCodeForDebugging(true);
    final IntHashSet transferFieldIds = new IntHashSet();
    final boolean isAnyWildcard = isAnyWildcard(exprs);
    final ClassifierResult result = new ClassifierResult();
    final boolean classify = isClassificationNeeded(exprs);
    for (NamedExpression namedExpression : exprs) {
        result.clear();
        if (classify && namedExpression.getExpr() instanceof SchemaPath) {
            classifyExpr(namedExpression, incomingBatch, result);
            if (result.isStar) {
                // The value indicates which wildcard we are processing now
                final Integer value = result.prefixMap.get(result.prefix);
                if (value != null && value == 1) {
                    int k = 0;
                    for (final VectorWrapper<?> wrapper : incomingBatch) {
                        final ValueVector vvIn = wrapper.getValueVector();
                        if (k > result.outputNames.size() - 1) {
                            assert false;
                        }
                        // get the renamed column names
                        final String name = result.outputNames.get(k++);
                        if (name.isEmpty()) {
                            continue;
                        }
                        if (isImplicitFileColumn(vvIn)) {
                            continue;
                        }
                        final FieldReference ref = new FieldReference(name);
                        final ValueVector vvOut = container.addOrGet(MaterializedField.create(ref.getAsNamePart().getName(), vvIn.getField().getType()), callBack);
                        final TransferPair tp = vvIn.makeTransferPair(vvOut);
                        transfers.add(tp);
                    }
                } else if (value != null && value > 1) {
                    // subsequent wildcards should do a copy of incoming valuevectors
                    int k = 0;
                    for (final VectorWrapper<?> wrapper : incomingBatch) {
                        final ValueVector vvIn = wrapper.getValueVector();
                        final SchemaPath originalPath = SchemaPath.getSimplePath(vvIn.getField().getName());
                        if (k > result.outputNames.size() - 1) {
                            assert false;
                        }
                        // get the renamed column names
                        final String name = result.outputNames.get(k++);
                        if (name.isEmpty()) {
                            continue;
                        }
                        if (isImplicitFileColumn(vvIn)) {
                            continue;
                        }
                        final LogicalExpression expr = ExpressionTreeMaterializer.materialize(originalPath, incomingBatch, collector, context.getFunctionRegistry());
                        if (collector.hasErrors()) {
                            throw new SchemaChangeException(String.format("Failure while trying to materialize incomingBatch schema.  Errors:\n %s.", collector.toErrorString()));
                        }
                        final MaterializedField outputField = MaterializedField.create(name, expr.getMajorType());
                        final ValueVector vv = container.addOrGet(outputField, callBack);
                        allocationVectors.add(vv);
                        final TypedFieldId fid = container.getValueVectorId(SchemaPath.getSimplePath(outputField.getName()));
                        final ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, true);
                        final HoldingContainer hc = cg.addExpr(write, ClassGenerator.BlkCreateMode.TRUE_IF_BOUND);
                    }
                }
                continue;
            }
        } else {
            // For the columns which do not needed to be classified,
            // it is still necessary to ensure the output column name is unique
            result.outputNames = Lists.newArrayList();
            final String outputName = getRef(namedExpression).getRootSegment().getPath();
            addToResultMaps(outputName, result, true);
        }
        String outputName = getRef(namedExpression).getRootSegment().getPath();
        if (result != null && result.outputNames != null && result.outputNames.size() > 0) {
            boolean isMatched = false;
            for (int j = 0; j < result.outputNames.size(); j++) {
                if (!result.outputNames.get(j).isEmpty()) {
                    outputName = result.outputNames.get(j);
                    isMatched = true;
                    break;
                }
            }
            if (!isMatched) {
                continue;
            }
        }
        final LogicalExpression expr = ExpressionTreeMaterializer.materialize(namedExpression.getExpr(), incomingBatch, collector, context.getFunctionRegistry(), true, unionTypeEnabled);
        final MaterializedField outputField = MaterializedField.create(outputName, expr.getMajorType());
        if (collector.hasErrors()) {
            throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema.  Errors:\n %s.", collector.toErrorString()));
        }
        // add value vector to transfer if direct reference and this is allowed, otherwise, add to evaluation stack.
        if (expr instanceof ValueVectorReadExpression && incomingBatch.getSchema().getSelectionVectorMode() == SelectionVectorMode.NONE && !((ValueVectorReadExpression) expr).hasReadPath() && !isAnyWildcard && !transferFieldIds.contains(((ValueVectorReadExpression) expr).getFieldId().getFieldIds()[0])) {
            final ValueVectorReadExpression vectorRead = (ValueVectorReadExpression) expr;
            final TypedFieldId id = vectorRead.getFieldId();
            final ValueVector vvIn = incomingBatch.getValueAccessorById(id.getIntermediateClass(), id.getFieldIds()).getValueVector();
            Preconditions.checkNotNull(incomingBatch);
            final FieldReference ref = getRef(namedExpression);
            final ValueVector vvOut = container.addOrGet(MaterializedField.create(ref.getLastSegment().getNameSegment().getPath(), vectorRead.getMajorType()), callBack);
            final TransferPair tp = vvIn.makeTransferPair(vvOut);
            transfers.add(tp);
            transferFieldIds.add(vectorRead.getFieldId().getFieldIds()[0]);
        } else if (expr instanceof DrillFuncHolderExpr && ((DrillFuncHolderExpr) expr).getHolder().isComplexWriterFuncHolder()) {
            // Lazy initialization of the list of complex writers, if not done yet.
            if (complexWriters == null) {
                complexWriters = Lists.newArrayList();
            } else {
                complexWriters.clear();
            }
            // The reference name will be passed to ComplexWriter, used as the name of the output vector from the writer.
            ((DrillFuncHolderExpr) expr).getFieldReference(namedExpression.getRef());
            cg.addExpr(expr, ClassGenerator.BlkCreateMode.TRUE_IF_BOUND);
            if (complexFieldReferencesList == null) {
                complexFieldReferencesList = Lists.newArrayList();
            }
            // save the field reference for later for getting schema when input is empty
            complexFieldReferencesList.add(namedExpression.getRef());
        } else {
            // need to do evaluation.
            final ValueVector vector = container.addOrGet(outputField, callBack);
            allocationVectors.add(vector);
            final TypedFieldId fid = container.getValueVectorId(SchemaPath.getSimplePath(outputField.getName()));
            final boolean useSetSafe = !(vector instanceof FixedWidthVector);
            final ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, useSetSafe);
            final HoldingContainer hc = cg.addExpr(write, ClassGenerator.BlkCreateMode.TRUE_IF_BOUND);
            // We cannot do multiple transfers from the same vector. However we still need to instantiate the output vector.
            if (expr instanceof ValueVectorReadExpression) {
                final ValueVectorReadExpression vectorRead = (ValueVectorReadExpression) expr;
                if (!vectorRead.hasReadPath()) {
                    final TypedFieldId id = vectorRead.getFieldId();
                    final ValueVector vvIn = incomingBatch.getValueAccessorById(id.getIntermediateClass(), id.getFieldIds()).getValueVector();
                    vvIn.makeTransferPair(vector);
                }
            }
            logger.debug("Added eval for project expression.");
        }
    }
    try {
        CodeGenerator<Projector> codeGen = cg.getCodeGenerator();
        codeGen.plainJavaCapable(true);
        // Uncomment out this line to debug the generated code.
        // codeGen.saveCodeForDebugging(true);
        this.projector = context.getImplementationClass(codeGen);
        projector.setup(context, incomingBatch, this, transfers);
    } catch (ClassTransformationException | IOException e) {
        throw new SchemaChangeException("Failure while attempting to load generated class", e);
    }
}
Also used : TransferPair(org.apache.drill.exec.record.TransferPair) IntHashSet(com.carrotsearch.hppc.IntHashSet) ErrorCollector(org.apache.drill.common.expression.ErrorCollector) DrillFuncHolderExpr(org.apache.drill.exec.expr.DrillFuncHolderExpr) ErrorCollectorImpl(org.apache.drill.common.expression.ErrorCollectorImpl) LogicalExpression(org.apache.drill.common.expression.LogicalExpression) HoldingContainer(org.apache.drill.exec.expr.ClassGenerator.HoldingContainer) SchemaPath(org.apache.drill.common.expression.SchemaPath) TypedFieldId(org.apache.drill.exec.record.TypedFieldId) ValueVectorWriteExpression(org.apache.drill.exec.expr.ValueVectorWriteExpression) FieldReference(org.apache.drill.common.expression.FieldReference) ClassTransformationException(org.apache.drill.exec.exception.ClassTransformationException) FixedWidthVector(org.apache.drill.exec.vector.FixedWidthVector) VectorWrapper(org.apache.drill.exec.record.VectorWrapper) MaterializedField(org.apache.drill.exec.record.MaterializedField) IOException(java.io.IOException) ValueVector(org.apache.drill.exec.vector.ValueVector) ValueVectorReadExpression(org.apache.drill.exec.expr.ValueVectorReadExpression) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) NamedExpression(org.apache.drill.common.logical.data.NamedExpression)

Example 48 with TypedFieldId

use of org.apache.drill.exec.record.TypedFieldId in project drill by axbaretto.

the class FlattenRecordBatch method setupNewSchema.

@Override
protected boolean setupNewSchema() throws SchemaChangeException {
    this.allocationVectors = Lists.newArrayList();
    container.clear();
    final List<NamedExpression> exprs = getExpressionList();
    final ErrorCollector collector = new ErrorCollectorImpl();
    final List<TransferPair> transfers = Lists.newArrayList();
    final ClassGenerator<Flattener> cg = CodeGenerator.getRoot(Flattener.TEMPLATE_DEFINITION, context.getOptions());
    cg.getCodeGenerator().plainJavaCapable(true);
    final IntHashSet transferFieldIds = new IntHashSet();
    final NamedExpression flattenExpr = new NamedExpression(popConfig.getColumn(), new FieldReference(popConfig.getColumn()));
    final ValueVectorReadExpression vectorRead = (ValueVectorReadExpression) ExpressionTreeMaterializer.materialize(flattenExpr.getExpr(), incoming, collector, context.getFunctionRegistry(), true);
    final FieldReference fieldReference = flattenExpr.getRef();
    final TransferPair transferPair = getFlattenFieldTransferPair(fieldReference);
    if (transferPair != null) {
        final ValueVector flattenVector = transferPair.getTo();
        // checks that list has only default ValueVector and replaces resulting ValueVector to INT typed ValueVector
        if (exprs.size() == 0 && flattenVector.getField().getType().equals(Types.LATE_BIND_TYPE)) {
            final MaterializedField outputField = MaterializedField.create(fieldReference.getAsNamePart().getName(), Types.OPTIONAL_INT);
            final ValueVector vector = TypeHelper.getNewVector(outputField, oContext.getAllocator());
            container.add(vector);
        } else {
            transfers.add(transferPair);
            container.add(flattenVector);
            transferFieldIds.add(vectorRead.getFieldId().getFieldIds()[0]);
        }
    }
    logger.debug("Added transfer for project expression.");
    ClassifierResult result = new ClassifierResult();
    for (NamedExpression namedExpression : exprs) {
        result.clear();
        String outputName = getRef(namedExpression).getRootSegment().getPath();
        if (result != null && result.outputNames != null && result.outputNames.size() > 0) {
            for (int j = 0; j < result.outputNames.size(); j++) {
                if (!result.outputNames.get(j).equals(EMPTY_STRING)) {
                    outputName = result.outputNames.get(j);
                    break;
                }
            }
        }
        final LogicalExpression expr = ExpressionTreeMaterializer.materialize(namedExpression.getExpr(), incoming, collector, context.getFunctionRegistry(), true);
        if (collector.hasErrors()) {
            throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema.  Errors:\n %s.", collector.toErrorString()));
        }
        if (expr instanceof DrillFuncHolderExpr && ((DrillFuncHolderExpr) expr).getHolder().isComplexWriterFuncHolder()) {
            // Lazy initialization of the list of complex writers, if not done yet.
            if (complexWriters == null) {
                complexWriters = Lists.newArrayList();
            }
            // The reference name will be passed to ComplexWriter, used as the name of the output vector from the writer.
            ((DrillFuncHolderExpr) expr).getFieldReference(namedExpression.getRef());
            cg.addExpr(expr);
        } else {
            // need to do evaluation.
            final MaterializedField outputField;
            if (expr instanceof ValueVectorReadExpression) {
                final TypedFieldId id = ValueVectorReadExpression.class.cast(expr).getFieldId();
                @SuppressWarnings("resource") final ValueVector incomingVector = incoming.getValueAccessorById(id.getIntermediateClass(), id.getFieldIds()).getValueVector();
                // when the first batch will be empty.
                if (incomingVector != null) {
                    outputField = incomingVector.getField().clone();
                } else {
                    outputField = MaterializedField.create(outputName, expr.getMajorType());
                }
            } else {
                outputField = MaterializedField.create(outputName, expr.getMajorType());
            }
            @SuppressWarnings("resource") final ValueVector vector = TypeHelper.getNewVector(outputField, oContext.getAllocator());
            allocationVectors.add(vector);
            TypedFieldId fid = container.add(vector);
            ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, true);
            cg.addExpr(write);
            logger.debug("Added eval for project expression.");
        }
    }
    cg.rotateBlock();
    cg.getEvalBlock()._return(JExpr.TRUE);
    container.buildSchema(SelectionVectorMode.NONE);
    try {
        this.flattener = context.getImplementationClass(cg.getCodeGenerator());
        flattener.setup(context, incoming, this, transfers);
    } catch (ClassTransformationException | IOException e) {
        throw new SchemaChangeException("Failure while attempting to load generated class", e);
    }
    return true;
}
Also used : TransferPair(org.apache.drill.exec.record.TransferPair) IntHashSet(com.carrotsearch.hppc.IntHashSet) ErrorCollector(org.apache.drill.common.expression.ErrorCollector) DrillFuncHolderExpr(org.apache.drill.exec.expr.DrillFuncHolderExpr) ErrorCollectorImpl(org.apache.drill.common.expression.ErrorCollectorImpl) LogicalExpression(org.apache.drill.common.expression.LogicalExpression) TypedFieldId(org.apache.drill.exec.record.TypedFieldId) ValueVectorWriteExpression(org.apache.drill.exec.expr.ValueVectorWriteExpression) FieldReference(org.apache.drill.common.expression.FieldReference) ClassTransformationException(org.apache.drill.exec.exception.ClassTransformationException) MaterializedField(org.apache.drill.exec.record.MaterializedField) IOException(java.io.IOException) ValueVectorReadExpression(org.apache.drill.exec.expr.ValueVectorReadExpression) RepeatedValueVector(org.apache.drill.exec.vector.complex.RepeatedValueVector) ValueVector(org.apache.drill.exec.vector.ValueVector) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) NamedExpression(org.apache.drill.common.logical.data.NamedExpression)

Example 49 with TypedFieldId

use of org.apache.drill.exec.record.TypedFieldId in project drill by axbaretto.

the class HashAggTemplate method setup.

@Override
public void setup(HashAggregate hashAggrConfig, HashTableConfig htConfig, FragmentContext context, OperatorContext oContext, RecordBatch incoming, HashAggBatch outgoing, LogicalExpression[] valueExprs, List<TypedFieldId> valueFieldIds, TypedFieldId[] groupByOutFieldIds, VectorContainer outContainer, int extraRowBytes) throws SchemaChangeException, IOException {
    if (valueExprs == null || valueFieldIds == null) {
        throw new IllegalArgumentException("Invalid aggr value exprs or workspace variables.");
    }
    if (valueFieldIds.size() < valueExprs.length) {
        throw new IllegalArgumentException("Wrong number of workspace variables.");
    }
    this.context = context;
    this.stats = oContext.getStats();
    this.allocator = oContext.getAllocator();
    this.oContext = oContext;
    this.incoming = incoming;
    this.outgoing = outgoing;
    this.outContainer = outContainer;
    this.operatorId = hashAggrConfig.getOperatorId();
    this.useMemoryPrediction = context.getOptions().getOption(ExecConstants.HASHAGG_USE_MEMORY_PREDICTION_VALIDATOR);
    is2ndPhase = hashAggrConfig.getAggPhase() == AggPrelBase.OperatorPhase.PHASE_2of2;
    isTwoPhase = hashAggrConfig.getAggPhase() != AggPrelBase.OperatorPhase.PHASE_1of1;
    is1stPhase = isTwoPhase && !is2ndPhase;
    // single phase can not spill
    canSpill = isTwoPhase;
    // Typically for testing - force a spill after a partition has more than so many batches
    minBatchesPerPartition = context.getOptions().getOption(ExecConstants.HASHAGG_MIN_BATCHES_PER_PARTITION_VALIDATOR);
    // Set the memory limit
    long memoryLimit = allocator.getLimit();
    // Optional configured memory limit, typically used only for testing.
    long configLimit = context.getOptions().getOption(ExecConstants.HASHAGG_MAX_MEMORY_VALIDATOR);
    if (configLimit > 0) {
        logger.warn("Memory limit was changed to {}", configLimit);
        memoryLimit = Math.min(memoryLimit, configLimit);
        // enforce at the allocator
        allocator.setLimit(memoryLimit);
    }
    // TODO:  This functionality will be added later.
    if (hashAggrConfig.getGroupByExprs().size() == 0) {
        throw new IllegalArgumentException("Currently, hash aggregation is only applicable if there are group-by " + "expressions.");
    }
    this.htIdxHolder = new IndexPointer();
    this.outStartIdxHolder = new IndexPointer();
    this.outNumRecordsHolder = new IndexPointer();
    materializedValueFields = new MaterializedField[valueFieldIds.size()];
    if (valueFieldIds.size() > 0) {
        int i = 0;
        FieldReference ref = new FieldReference("dummy", ExpressionPosition.UNKNOWN, valueFieldIds.get(0).getIntermediateType());
        for (TypedFieldId id : valueFieldIds) {
            materializedValueFields[i++] = MaterializedField.create(ref.getAsNamePart().getName(), id.getIntermediateType());
        }
    }
    spillSet = new SpillSet(context, hashAggrConfig);
    baseHashTable = new ChainedHashTable(htConfig, context, allocator, incoming, null, /* no incoming probe */
    outgoing);
    // retain these for delayedSetup, and to allow recreating hash tables (after a spill)
    this.groupByOutFieldIds = groupByOutFieldIds;
    numGroupByOutFields = groupByOutFieldIds.length;
    // Start calculating the row widths (with the extra columns; the rest would be done in updateEstMaxBatchSize() )
    estRowWidth = extraRowBytes;
    estValuesRowWidth = extraRowBytes;
    doSetup(incoming);
}
Also used : FieldReference(org.apache.drill.common.expression.FieldReference) TypedFieldId(org.apache.drill.exec.record.TypedFieldId) IndexPointer(org.apache.drill.exec.physical.impl.common.IndexPointer) SpillSet(org.apache.drill.exec.physical.impl.spill.SpillSet) ChainedHashTable(org.apache.drill.exec.physical.impl.common.ChainedHashTable)

Example 50 with TypedFieldId

use of org.apache.drill.exec.record.TypedFieldId in project drill by axbaretto.

the class ChainedHashTable method createAndSetupHashTable.

public HashTable createAndSetupHashTable(TypedFieldId[] outKeyFieldIds, int numPartitions) throws ClassTransformationException, IOException, SchemaChangeException {
    CodeGenerator<HashTable> top = CodeGenerator.get(HashTable.TEMPLATE_DEFINITION, context.getOptions());
    top.plainJavaCapable(true);
    // Uncomment out this line to debug the generated code.
    // This code is called from generated code, so to step into this code,
    // persist the code generated in HashAggBatch also.
    // top.saveCodeForDebugging(true);
    // use a subclass
    top.preferPlainJava(true);
    ClassGenerator<HashTable> cg = top.getRoot();
    ClassGenerator<HashTable> cgInner = cg.getInnerGenerator("BatchHolder");
    LogicalExpression[] keyExprsBuild = new LogicalExpression[htConfig.getKeyExprsBuild().size()];
    LogicalExpression[] keyExprsProbe = null;
    boolean isProbe = (htConfig.getKeyExprsProbe() != null);
    if (isProbe) {
        keyExprsProbe = new LogicalExpression[htConfig.getKeyExprsProbe().size()];
    }
    ErrorCollector collector = new ErrorCollectorImpl();
    // original ht container from which others may be cloned
    VectorContainer htContainerOrig = new VectorContainer();
    TypedFieldId[] htKeyFieldIds = new TypedFieldId[htConfig.getKeyExprsBuild().size()];
    int i = 0;
    for (NamedExpression ne : htConfig.getKeyExprsBuild()) {
        final LogicalExpression expr = ExpressionTreeMaterializer.materialize(ne.getExpr(), incomingBuild, collector, context.getFunctionRegistry());
        if (collector.hasErrors()) {
            throw new SchemaChangeException("Failure while materializing expression. " + collector.toErrorString());
        }
        if (expr == null) {
            continue;
        }
        keyExprsBuild[i] = expr;
        i++;
    }
    if (isProbe) {
        i = 0;
        for (NamedExpression ne : htConfig.getKeyExprsProbe()) {
            final LogicalExpression expr = ExpressionTreeMaterializer.materialize(ne.getExpr(), incomingProbe, collector, context.getFunctionRegistry());
            if (collector.hasErrors()) {
                throw new SchemaChangeException("Failure while materializing expression. " + collector.toErrorString());
            }
            if (expr == null) {
                continue;
            }
            keyExprsProbe[i] = expr;
            i++;
        }
        JoinUtils.addLeastRestrictiveCasts(keyExprsProbe, incomingProbe, keyExprsBuild, incomingBuild, context);
    }
    i = 0;
    /*
     * Once the implicit casts have been added, create the value vectors for the corresponding
     * type and add it to the hash table's container.
     * Note: Adding implicit casts may have a minor impact on the memory foot print. For example
     * if we have a join condition with bigint on the probe side and int on the build side then
     * after this change we will be allocating a bigint vector in the hashtable instead of an int
     * vector.
     */
    for (NamedExpression ne : htConfig.getKeyExprsBuild()) {
        LogicalExpression expr = keyExprsBuild[i];
        final MaterializedField outputField = MaterializedField.create(ne.getRef().getLastSegment().getNameSegment().getPath(), expr.getMajorType());
        @SuppressWarnings("resource") ValueVector vv = TypeHelper.getNewVector(outputField, allocator);
        htKeyFieldIds[i] = htContainerOrig.add(vv);
        i++;
    }
    // generate code for isKeyMatch(), setValue(), getHash() and outputRecordKeys()
    setupIsKeyMatchInternal(cgInner, KeyMatchIncomingBuildMapping, KeyMatchHtableMapping, keyExprsBuild, htConfig.getComparators(), htKeyFieldIds);
    setupIsKeyMatchInternal(cgInner, KeyMatchIncomingProbeMapping, KeyMatchHtableProbeMapping, keyExprsProbe, htConfig.getComparators(), htKeyFieldIds);
    setupSetValue(cgInner, keyExprsBuild, htKeyFieldIds);
    if (outgoing != null) {
        if (outKeyFieldIds.length > htConfig.getKeyExprsBuild().size()) {
            throw new IllegalArgumentException("Mismatched number of output key fields.");
        }
    }
    setupOutputRecordKeys(cgInner, htKeyFieldIds, outKeyFieldIds);
    setupGetHash(cg, /* use top level code generator for getHash */
    GetHashIncomingBuildMapping, incomingBuild, keyExprsBuild, false);
    setupGetHash(cg, /* use top level code generator for getHash */
    GetHashIncomingProbeMapping, incomingProbe, keyExprsProbe, true);
    HashTable ht = context.getImplementationClass(top);
    ht.setup(htConfig, allocator, incomingBuild, incomingProbe, outgoing, htContainerOrig);
    return ht;
}
Also used : ErrorCollector(org.apache.drill.common.expression.ErrorCollector) MaterializedField(org.apache.drill.exec.record.MaterializedField) VectorContainer(org.apache.drill.exec.record.VectorContainer) ErrorCollectorImpl(org.apache.drill.common.expression.ErrorCollectorImpl) ValueVector(org.apache.drill.exec.vector.ValueVector) LogicalExpression(org.apache.drill.common.expression.LogicalExpression) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) NamedExpression(org.apache.drill.common.logical.data.NamedExpression) TypedFieldId(org.apache.drill.exec.record.TypedFieldId)

Aggregations

TypedFieldId (org.apache.drill.exec.record.TypedFieldId)63 LogicalExpression (org.apache.drill.common.expression.LogicalExpression)30 ErrorCollector (org.apache.drill.common.expression.ErrorCollector)22 ErrorCollectorImpl (org.apache.drill.common.expression.ErrorCollectorImpl)22 MaterializedField (org.apache.drill.exec.record.MaterializedField)22 ValueVector (org.apache.drill.exec.vector.ValueVector)22 SchemaChangeException (org.apache.drill.exec.exception.SchemaChangeException)21 SchemaPath (org.apache.drill.common.expression.SchemaPath)18 ValueVectorWriteExpression (org.apache.drill.exec.expr.ValueVectorWriteExpression)17 ValueVectorReadExpression (org.apache.drill.exec.expr.ValueVectorReadExpression)12 TransferPair (org.apache.drill.exec.record.TransferPair)12 Test (org.junit.Test)11 JVar (com.sun.codemodel.JVar)10 NamedExpression (org.apache.drill.common.logical.data.NamedExpression)9 VectorWrapper (org.apache.drill.exec.record.VectorWrapper)9 FieldReference (org.apache.drill.common.expression.FieldReference)7 TypeProtos (org.apache.drill.common.types.TypeProtos)7 MajorType (org.apache.drill.common.types.TypeProtos.MajorType)7 JExpression (com.sun.codemodel.JExpression)6 IOException (java.io.IOException)6