Search in sources :

Example 16 with SchemaChangeException

use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.

the class FlattenRecordBatch method setupNewSchema.

@Override
protected boolean setupNewSchema() throws SchemaChangeException {
    this.allocationVectors = Lists.newArrayList();
    container.clear();
    final List<NamedExpression> exprs = getExpressionList();
    final ErrorCollector collector = new ErrorCollectorImpl();
    final List<TransferPair> transfers = Lists.newArrayList();
    final ClassGenerator<Flattener> cg = CodeGenerator.getRoot(Flattener.TEMPLATE_DEFINITION, context.getFunctionRegistry(), context.getOptions());
    cg.getCodeGenerator().plainJavaCapable(true);
    // Uncomment out this line to debug the generated code.
    //    cg.getCodeGenerator().saveCodeForDebugging(true);
    final IntHashSet transferFieldIds = new IntHashSet();
    final NamedExpression flattenExpr = new NamedExpression(popConfig.getColumn(), new FieldReference(popConfig.getColumn()));
    final ValueVectorReadExpression vectorRead = (ValueVectorReadExpression) ExpressionTreeMaterializer.materialize(flattenExpr.getExpr(), incoming, collector, context.getFunctionRegistry(), true);
    final FieldReference fieldReference = flattenExpr.getRef();
    final TransferPair transferPair = getFlattenFieldTransferPair(fieldReference);
    if (transferPair != null) {
        final ValueVector flattenVector = transferPair.getTo();
        // checks that list has only default ValueVector and replaces resulting ValueVector to INT typed ValueVector
        if (exprs.size() == 0 && flattenVector.getField().getType().equals(Types.LATE_BIND_TYPE)) {
            final MaterializedField outputField = MaterializedField.create(fieldReference.getAsNamePart().getName(), Types.OPTIONAL_INT);
            final ValueVector vector = TypeHelper.getNewVector(outputField, oContext.getAllocator());
            container.add(vector);
        } else {
            transfers.add(transferPair);
            container.add(flattenVector);
            transferFieldIds.add(vectorRead.getFieldId().getFieldIds()[0]);
        }
    }
    logger.debug("Added transfer for project expression.");
    ClassifierResult result = new ClassifierResult();
    for (int i = 0; i < exprs.size(); i++) {
        final NamedExpression namedExpression = exprs.get(i);
        result.clear();
        String outputName = getRef(namedExpression).getRootSegment().getPath();
        if (result != null && result.outputNames != null && result.outputNames.size() > 0) {
            for (int j = 0; j < result.outputNames.size(); j++) {
                if (!result.outputNames.get(j).equals(EMPTY_STRING)) {
                    outputName = result.outputNames.get(j);
                    break;
                }
            }
        }
        final LogicalExpression expr = ExpressionTreeMaterializer.materialize(namedExpression.getExpr(), incoming, collector, context.getFunctionRegistry(), true);
        if (collector.hasErrors()) {
            throw new SchemaChangeException(String.format("Failure while trying to materialize incoming schema.  Errors:\n %s.", collector.toErrorString()));
        }
        if (expr instanceof DrillFuncHolderExpr && ((DrillFuncHolderExpr) expr).getHolder().isComplexWriterFuncHolder()) {
            // Lazy initialization of the list of complex writers, if not done yet.
            if (complexWriters == null) {
                complexWriters = Lists.newArrayList();
            }
            // The reference name will be passed to ComplexWriter, used as the name of the output vector from the writer.
            ((DrillFuncHolderExpr) expr).getFieldReference(namedExpression.getRef());
            cg.addExpr(expr);
        } else {
            // need to do evaluation.
            final MaterializedField outputField;
            if (expr instanceof ValueVectorReadExpression) {
                final TypedFieldId id = ValueVectorReadExpression.class.cast(expr).getFieldId();
                @SuppressWarnings("resource") final ValueVector incomingVector = incoming.getValueAccessorById(id.getIntermediateClass(), id.getFieldIds()).getValueVector();
                // when the first batch will be empty.
                if (incomingVector != null) {
                    outputField = incomingVector.getField().clone();
                } else {
                    outputField = MaterializedField.create(outputName, expr.getMajorType());
                }
            } else {
                outputField = MaterializedField.create(outputName, expr.getMajorType());
            }
            @SuppressWarnings("resource") final ValueVector vector = TypeHelper.getNewVector(outputField, oContext.getAllocator());
            allocationVectors.add(vector);
            TypedFieldId fid = container.add(vector);
            ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, true);
            cg.addExpr(write);
            logger.debug("Added eval for project expression.");
        }
    }
    cg.rotateBlock();
    cg.getEvalBlock()._return(JExpr.TRUE);
    container.buildSchema(SelectionVectorMode.NONE);
    try {
        this.flattener = context.getImplementationClass(cg.getCodeGenerator());
        flattener.setup(context, incoming, this, transfers);
    } catch (ClassTransformationException | IOException e) {
        throw new SchemaChangeException("Failure while attempting to load generated class", e);
    }
    return true;
}
Also used : TransferPair(org.apache.drill.exec.record.TransferPair) IntHashSet(com.carrotsearch.hppc.IntHashSet) ErrorCollector(org.apache.drill.common.expression.ErrorCollector) DrillFuncHolderExpr(org.apache.drill.exec.expr.DrillFuncHolderExpr) ErrorCollectorImpl(org.apache.drill.common.expression.ErrorCollectorImpl) LogicalExpression(org.apache.drill.common.expression.LogicalExpression) TypedFieldId(org.apache.drill.exec.record.TypedFieldId) ValueVectorWriteExpression(org.apache.drill.exec.expr.ValueVectorWriteExpression) FieldReference(org.apache.drill.common.expression.FieldReference) ClassTransformationException(org.apache.drill.exec.exception.ClassTransformationException) MaterializedField(org.apache.drill.exec.record.MaterializedField) IOException(java.io.IOException) ValueVectorReadExpression(org.apache.drill.exec.expr.ValueVectorReadExpression) RepeatedValueVector(org.apache.drill.exec.vector.complex.RepeatedValueVector) ValueVector(org.apache.drill.exec.vector.ValueVector) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) NamedExpression(org.apache.drill.common.logical.data.NamedExpression)

Example 17 with SchemaChangeException

use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.

the class NestedLoopJoinBatch method buildSchema.

/**
   * Builds the output container's schema. Goes over the left and the right
   * batch and adds the corresponding vectors to the output container.
   * @throws SchemaChangeException if batch schema was changed during execution
   */
@Override
protected void buildSchema() throws SchemaChangeException {
    try {
        leftUpstream = next(LEFT_INPUT, left);
        rightUpstream = next(RIGHT_INPUT, right);
        if (leftUpstream == IterOutcome.STOP || rightUpstream == IterOutcome.STOP) {
            state = BatchState.STOP;
            return;
        }
        if (leftUpstream == IterOutcome.OUT_OF_MEMORY || rightUpstream == IterOutcome.OUT_OF_MEMORY) {
            state = BatchState.OUT_OF_MEMORY;
            return;
        }
        if (leftUpstream != IterOutcome.NONE) {
            leftSchema = left.getSchema();
            for (final VectorWrapper<?> vw : left) {
                container.addOrGet(vw.getField());
            }
        }
        if (rightUpstream != IterOutcome.NONE) {
            // make right input schema optional if we have LEFT join
            for (final VectorWrapper<?> vectorWrapper : right) {
                TypeProtos.MajorType inputType = vectorWrapper.getField().getType();
                TypeProtos.MajorType outputType;
                if (popConfig.getJoinType() == JoinRelType.LEFT && inputType.getMode() == TypeProtos.DataMode.REQUIRED) {
                    outputType = Types.overrideMode(inputType, TypeProtos.DataMode.OPTIONAL);
                } else {
                    outputType = inputType;
                }
                MaterializedField newField = MaterializedField.create(vectorWrapper.getField().getPath(), outputType);
                ValueVector valueVector = container.addOrGet(newField);
                if (valueVector instanceof AbstractContainerVector) {
                    vectorWrapper.getValueVector().makeTransferPair(valueVector);
                    valueVector.clear();
                }
            }
            rightSchema = right.getSchema();
            addBatchToHyperContainer(right);
        }
        allocateVectors();
        nljWorker = setupWorker();
        // if left batch is empty, fetch next
        if (leftUpstream != IterOutcome.NONE && left.getRecordCount() == 0) {
            leftUpstream = next(LEFT_INPUT, left);
        }
        container.setRecordCount(0);
        container.buildSchema(BatchSchema.SelectionVectorMode.NONE);
    } catch (ClassTransformationException | IOException e) {
        throw new SchemaChangeException(e);
    }
}
Also used : ValueVector(org.apache.drill.exec.vector.ValueVector) AbstractContainerVector(org.apache.drill.exec.vector.complex.AbstractContainerVector) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) ClassTransformationException(org.apache.drill.exec.exception.ClassTransformationException) MaterializedField(org.apache.drill.exec.record.MaterializedField) IOException(java.io.IOException) TypeProtos(org.apache.drill.common.types.TypeProtos)

Example 18 with SchemaChangeException

use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.

the class OrderedPartitionRecordBatch method innerNext.

@Override
public IterOutcome innerNext() {
    recordCount = 0;
    container.zeroVectors();
    // done
    if (upstreamNone && (batchQueue == null || batchQueue.size() == 0)) {
        return IterOutcome.NONE;
    }
    // if there are batches on the queue, process them first, rather than calling incoming.next()
    if (batchQueue != null && batchQueue.size() > 0) {
        VectorContainer vc = batchQueue.poll();
        recordCount = vc.getRecordCount();
        try {
            // Must set up a new schema each time, because ValueVectors are not reused between containers in queue
            setupNewSchema(vc);
        } catch (SchemaChangeException ex) {
            kill(false);
            logger.error("Failure during query", ex);
            context.fail(ex);
            return IterOutcome.STOP;
        }
        doWork(vc);
        vc.zeroVectors();
        return IterOutcome.OK_NEW_SCHEMA;
    }
    // Reaching this point, either this is the first iteration, or there are no batches left on the queue and there are
    // more incoming
    IterOutcome upstream = next(incoming);
    if (this.first && upstream == IterOutcome.OK) {
        throw new RuntimeException("Invalid state: First batch should have OK_NEW_SCHEMA");
    }
    // If this is the first iteration, we need to generate the partition vectors before we can proceed
    if (this.first && upstream == IterOutcome.OK_NEW_SCHEMA) {
        if (!getPartitionVectors()) {
            close();
            return IterOutcome.STOP;
        }
        batchQueue = new LinkedBlockingQueue<>(this.sampledIncomingBatches);
        first = false;
        // Now that we have the partition vectors, we immediately process the first batch on the queue
        VectorContainer vc = batchQueue.poll();
        try {
            setupNewSchema(vc);
        } catch (SchemaChangeException ex) {
            kill(false);
            logger.error("Failure during query", ex);
            context.fail(ex);
            return IterOutcome.STOP;
        }
        doWork(vc);
        vc.zeroVectors();
        recordCount = vc.getRecordCount();
        return IterOutcome.OK_NEW_SCHEMA;
    }
    // we need to generate a new schema, even if the outcome is IterOutcome.OK After that we can reuse the schema.
    if (this.startedUnsampledBatches == false) {
        this.startedUnsampledBatches = true;
        if (upstream == IterOutcome.OK) {
            upstream = IterOutcome.OK_NEW_SCHEMA;
        }
    }
    switch(upstream) {
        case NONE:
        case NOT_YET:
        case STOP:
            close();
            recordCount = 0;
            return upstream;
        case OK_NEW_SCHEMA:
            try {
                setupNewSchema(incoming);
            } catch (SchemaChangeException ex) {
                kill(false);
                logger.error("Failure during query", ex);
                context.fail(ex);
                return IterOutcome.STOP;
            }
        // fall through.
        case OK:
            doWork(incoming);
            recordCount = incoming.getRecordCount();
            // change if upstream changed, otherwise normal.
            return upstream;
        default:
            throw new UnsupportedOperationException();
    }
}
Also used : SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) VectorContainer(org.apache.drill.exec.record.VectorContainer) CachedVectorContainer(org.apache.drill.exec.cache.CachedVectorContainer)

Example 19 with SchemaChangeException

use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.

the class OrderedPartitionRecordBatch method getPartitionVectors.

/**
   * This method is called when the first batch comes in. Incoming batches are collected until a threshold is met. At
   * that point, the records in the batches are sorted and sampled, and the sampled records are stored in the
   * distributed cache. Once a sufficient fraction of the fragments have shared their samples, each fragment grabs all
   * the samples, sorts all the records, builds a partition table, and attempts to push the partition table to the
   * distributed cache. Whichever table gets pushed first becomes the table used by all fragments for partitioning.
   *
   * @return True is successful. False if failed.
   */
private boolean getPartitionVectors() {
    try {
        if (!saveSamples()) {
            return false;
        }
        CachedVectorContainer finalTable = null;
        long val = minorFragmentSampleCount.incrementAndGet();
        logger.debug("Incremented mfsc, got {}", val);
        final long fragmentsBeforeProceed = (long) Math.ceil(sendingMajorFragmentWidth * completionFactor);
        final String finalTableKey = mapKey + "final";
        if (val == fragmentsBeforeProceed) {
            // we crossed the barrier, build table and get data.
            buildTable();
            finalTable = tableMap.get(finalTableKey);
        } else {
            if (val < fragmentsBeforeProceed) {
                if (!waitUntilTimeOut(10)) {
                    return false;
                }
            }
            for (int i = 0; i < 100 && finalTable == null; i++) {
                finalTable = tableMap.get(finalTableKey);
                if (finalTable != null) {
                    break;
                }
                if (!waitUntilTimeOut(10)) {
                    return false;
                }
            }
            if (finalTable == null) {
                buildTable();
            }
            finalTable = tableMap.get(finalTableKey);
        }
        Preconditions.checkState(finalTable != null);
        // the rest of this operator
        for (VectorWrapper<?> w : finalTable.get()) {
            partitionVectors.add(w.getValueVector());
        }
    } catch (final ClassTransformationException | IOException | SchemaChangeException ex) {
        kill(false);
        context.fail(ex);
        return false;
    // TODO InterruptedException
    }
    return true;
}
Also used : SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) ClassTransformationException(org.apache.drill.exec.exception.ClassTransformationException) IOException(java.io.IOException) CachedVectorContainer(org.apache.drill.exec.cache.CachedVectorContainer)

Example 20 with SchemaChangeException

use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.

the class OrderedPartitionRecordBatch method setupNewSchema.

/**
   * Sets up projection that will transfer all of the columns in batch, and also populate the partition column based on
   * which partition a record falls into in the partition table
   *
   * @param batch
   * @throws SchemaChangeException
   */
protected void setupNewSchema(VectorAccessible batch) throws SchemaChangeException {
    container.clear();
    final ErrorCollector collector = new ErrorCollectorImpl();
    final List<TransferPair> transfers = Lists.newArrayList();
    final ClassGenerator<OrderedPartitionProjector> cg = CodeGenerator.getRoot(OrderedPartitionProjector.TEMPLATE_DEFINITION, context.getFunctionRegistry(), context.getOptions());
    for (VectorWrapper<?> vw : batch) {
        TransferPair tp = vw.getValueVector().getTransferPair(oContext.getAllocator());
        transfers.add(tp);
        container.add(tp.getTo());
    }
    cg.setMappingSet(mainMapping);
    int count = 0;
    for (Ordering od : popConfig.getOrderings()) {
        final LogicalExpression expr = ExpressionTreeMaterializer.materialize(od.getExpr(), batch, collector, context.getFunctionRegistry());
        if (collector.hasErrors()) {
            throw new SchemaChangeException("Failure while materializing expression. " + collector.toErrorString());
        }
        cg.setMappingSet(incomingMapping);
        ClassGenerator.HoldingContainer left = cg.addExpr(expr, ClassGenerator.BlkCreateMode.FALSE);
        cg.setMappingSet(partitionMapping);
        ClassGenerator.HoldingContainer right = cg.addExpr(new ValueVectorReadExpression(new TypedFieldId(expr.getMajorType(), count++)), ClassGenerator.BlkCreateMode.FALSE);
        cg.setMappingSet(mainMapping);
        // next we wrap the two comparison sides and add the expression block for the comparison.
        LogicalExpression fh = FunctionGenerationHelper.getOrderingComparator(od.nullsSortHigh(), left, right, context.getFunctionRegistry());
        ClassGenerator.HoldingContainer out = cg.addExpr(fh, ClassGenerator.BlkCreateMode.FALSE);
        JConditional jc = cg.getEvalBlock()._if(out.getValue().ne(JExpr.lit(0)));
        if (od.getDirection() == Direction.ASCENDING) {
            jc._then()._return(out.getValue());
        } else {
            jc._then()._return(out.getValue().minus());
        }
    }
    cg.getEvalBlock()._return(JExpr.lit(0));
    container.add(this.partitionKeyVector);
    container.buildSchema(batch.getSchema().getSelectionVectorMode());
    try {
        this.projector = context.getImplementationClass(cg);
        projector.setup(context, batch, this, transfers, partitionVectors, partitions, popConfig.getRef());
    } catch (ClassTransformationException | IOException e) {
        throw new SchemaChangeException("Failure while attempting to load generated class", e);
    }
}
Also used : TransferPair(org.apache.drill.exec.record.TransferPair) ClassTransformationException(org.apache.drill.exec.exception.ClassTransformationException) ErrorCollector(org.apache.drill.common.expression.ErrorCollector) IOException(java.io.IOException) ErrorCollectorImpl(org.apache.drill.common.expression.ErrorCollectorImpl) ValueVectorReadExpression(org.apache.drill.exec.expr.ValueVectorReadExpression) LogicalExpression(org.apache.drill.common.expression.LogicalExpression) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) ClassGenerator(org.apache.drill.exec.expr.ClassGenerator) TypedFieldId(org.apache.drill.exec.record.TypedFieldId) Ordering(org.apache.drill.common.logical.data.Order.Ordering) HoldingContainer(org.apache.drill.exec.expr.ClassGenerator.HoldingContainer) JConditional(com.sun.codemodel.JConditional)

Aggregations

SchemaChangeException (org.apache.drill.exec.exception.SchemaChangeException)66 IOException (java.io.IOException)23 MaterializedField (org.apache.drill.exec.record.MaterializedField)20 ErrorCollector (org.apache.drill.common.expression.ErrorCollector)18 ErrorCollectorImpl (org.apache.drill.common.expression.ErrorCollectorImpl)18 LogicalExpression (org.apache.drill.common.expression.LogicalExpression)18 ValueVector (org.apache.drill.exec.vector.ValueVector)18 ClassTransformationException (org.apache.drill.exec.exception.ClassTransformationException)16 TransferPair (org.apache.drill.exec.record.TransferPair)9 HoldingContainer (org.apache.drill.exec.expr.ClassGenerator.HoldingContainer)8 TypedFieldId (org.apache.drill.exec.record.TypedFieldId)8 ExecutionSetupException (org.apache.drill.common.exceptions.ExecutionSetupException)7 Ordering (org.apache.drill.common.logical.data.Order.Ordering)7 JConditional (com.sun.codemodel.JConditional)6 NamedExpression (org.apache.drill.common.logical.data.NamedExpression)6 ValueVectorWriteExpression (org.apache.drill.exec.expr.ValueVectorWriteExpression)6 RecordBatchLoader (org.apache.drill.exec.record.RecordBatchLoader)6 VectorContainer (org.apache.drill.exec.record.VectorContainer)6 SchemaPath (org.apache.drill.common.expression.SchemaPath)5 RecordBatchData (org.apache.drill.exec.physical.impl.sort.RecordBatchData)5