Search in sources :

Example 21 with MajorType

use of org.apache.drill.common.types.TypeProtos.MajorType in project drill by apache.

the class PreparedStatementProvider method serializeColumn.

/**
   * Serialize the given {@link SerializedField} into a {@link ResultColumnMetadata}.
   * @param field
   * @return
   */
private static ResultColumnMetadata serializeColumn(SerializedField field) {
    final ResultColumnMetadata.Builder builder = ResultColumnMetadata.newBuilder();
    final MajorType majorType = field.getMajorType();
    final MinorType minorType = majorType.getMinorType();
    /**
     * Defaults to "DRILL" as drill has as only one catalog.
     */
    builder.setCatalogName(InfoSchemaConstants.IS_CATALOG_NAME);
    /**
     * Designated column's schema name. Empty string if not applicable. Initial implementation defaults to empty string
     * as we use LIMIT 0 queries to get the schema and schema info is lost. If we derive the schema from plan, we may
     * get the right value.
     */
    builder.setSchemaName("");
    /**
     * Designated column's table name. Not set if not applicable. Initial implementation defaults to empty string as
     * we use LIMIT 0 queries to get the schema and table info is lost. If we derive the table from plan, we may get
     * the right value.
     */
    builder.setTableName("");
    builder.setColumnName(field.getNamePart().getName());
    /**
     * Column label name for display or print purposes.
     * Ex. a column named "empName" might be labeled as "Employee Name".
     * Initial implementation defaults to same value as column name.
     */
    builder.setLabel(field.getNamePart().getName());
    /**
     * Data type in string format. Value is SQL standard type.
     */
    builder.setDataType(Types.getSqlTypeName(majorType));
    builder.setIsNullable(majorType.getMode() == DataMode.OPTIONAL);
    /**
     * For numeric data, this is the maximum precision.
     * For character data, this is the length in characters.
     * For datetime data types, this is the length in characters of the String representation
     *    (assuming the maximum allowed precision of the fractional seconds component).
     * For binary data, this is the length in bytes.
     * For all other types 0 is returned where the column size is not applicable.
     */
    builder.setPrecision(Types.getPrecision(field.getMajorType()));
    /**
     * Column's number of digits to right of the decimal point. 0 is returned for types where the scale is not applicable
     */
    builder.setScale(Types.getScale(majorType));
    /**
     * Indicates whether values in the designated column are signed numbers.
     */
    builder.setSigned(Types.isNumericType(majorType));
    /**
     * Maximum number of characters required to display data from the column.
     */
    builder.setDisplaySize(Types.getJdbcDisplaySize(majorType));
    /**
     * Is the column an aliased column. Initial implementation defaults to true as we derive schema from LIMIT 0 query and
     * not plan
     */
    builder.setIsAliased(true);
    builder.setSearchability(ColumnSearchability.ALL);
    builder.setUpdatability(ColumnUpdatability.READ_ONLY);
    builder.setAutoIncrement(false);
    builder.setCaseSensitivity(false);
    builder.setSortable(Types.isSortable(minorType));
    /**
     * Returns the fully-qualified name of the Java class whose instances are manufactured if the method
     * ResultSet.getObject is called to retrieve a value from the column. Applicable only to JDBC clients.
     */
    builder.setClassName(DRILL_TYPE_TO_JDBC_CLASSNAME.get(minorType));
    builder.setIsCurrency(false);
    return builder.build();
}
Also used : ResultColumnMetadata(org.apache.drill.exec.proto.UserProtos.ResultColumnMetadata) MajorType(org.apache.drill.common.types.TypeProtos.MajorType) MinorType(org.apache.drill.common.types.TypeProtos.MinorType)

Example 22 with MajorType

use of org.apache.drill.common.types.TypeProtos.MajorType in project drill by apache.

the class KuduRecordReader method initCols.

private void initCols(Schema schema) throws SchemaChangeException {
    ImmutableList.Builder<ProjectedColumnInfo> pciBuilder = ImmutableList.builder();
    for (int i = 0; i < schema.getColumnCount(); i++) {
        ColumnSchema col = schema.getColumnByIndex(i);
        final String name = col.getName();
        final Type kuduType = col.getType();
        MinorType minorType = TYPES.get(kuduType);
        if (minorType == null) {
            logger.warn("Ignoring column that is unsupported.", UserException.unsupportedError().message("A column you queried has a data type that is not currently supported by the Kudu storage plugin. " + "The column's name was %s and its Kudu data type was %s. ", name, kuduType.toString()).addContext("column Name", name).addContext("plugin", "kudu").build(logger));
            continue;
        }
        MajorType majorType;
        if (col.isNullable()) {
            majorType = Types.optional(minorType);
        } else {
            majorType = Types.required(minorType);
        }
        MaterializedField field = MaterializedField.create(name, majorType);
        final Class<? extends ValueVector> clazz = (Class<? extends ValueVector>) TypeHelper.getValueVectorClass(minorType, majorType.getMode());
        ValueVector vector = output.addField(field, clazz);
        vector.allocateNew();
        ProjectedColumnInfo pci = new ProjectedColumnInfo();
        pci.vv = vector;
        pci.kuduColumn = col;
        pci.index = i;
        pciBuilder.add(pci);
    }
    projectedCols = pciBuilder.build();
}
Also used : ImmutableList(com.google.common.collect.ImmutableList) MajorType(org.apache.drill.common.types.TypeProtos.MajorType) ColumnSchema(org.apache.kudu.ColumnSchema) MaterializedField(org.apache.drill.exec.record.MaterializedField) ValueVector(org.apache.drill.exec.vector.ValueVector) Type(org.apache.kudu.Type) MajorType(org.apache.drill.common.types.TypeProtos.MajorType) MinorType(org.apache.drill.common.types.TypeProtos.MinorType) MinorType(org.apache.drill.common.types.TypeProtos.MinorType)

Example 23 with MajorType

use of org.apache.drill.common.types.TypeProtos.MajorType in project drill by apache.

the class JdbcRecordReader method setup.

@Override
public void setup(OperatorContext operatorContext, OutputMutator output) throws ExecutionSetupException {
    try {
        this.operatorContext = operatorContext;
        connection = source.getConnection();
        statement = connection.createStatement();
        resultSet = statement.executeQuery(sql);
        final ResultSetMetaData meta = resultSet.getMetaData();
        final int columns = meta.getColumnCount();
        ImmutableList.Builder<ValueVector> vectorBuilder = ImmutableList.builder();
        ImmutableList.Builder<Copier<?>> copierBuilder = ImmutableList.builder();
        for (int i = 1; i <= columns; i++) {
            final String name = meta.getColumnLabel(i);
            final int jdbcType = meta.getColumnType(i);
            final int width = meta.getPrecision(i);
            final int scale = meta.getScale(i);
            MinorType minorType = JDBC_TYPE_MAPPINGS.get(jdbcType);
            if (minorType == null) {
                logger.warn("Ignoring column that is unsupported.", UserException.unsupportedError().message("A column you queried has a data type that is not currently supported by the JDBC storage plugin. " + "The column's name was %s and its JDBC data type was %s. ", name, nameFromType(jdbcType)).addContext("sql", sql).addContext("column Name", name).addContext("plugin", storagePluginName).build(logger));
                continue;
            }
            final MajorType type = Types.optional(minorType);
            final MaterializedField field = MaterializedField.create(name, type);
            final Class<? extends ValueVector> clazz = (Class<? extends ValueVector>) TypeHelper.getValueVectorClass(minorType, type.getMode());
            ValueVector vector = output.addField(field, clazz);
            vectorBuilder.add(vector);
            copierBuilder.add(getCopier(jdbcType, i, resultSet, vector));
        }
        vectors = vectorBuilder.build();
        copiers = copierBuilder.build();
    } catch (SQLException | SchemaChangeException e) {
        throw UserException.dataReadError(e).message("The JDBC storage plugin failed while trying setup the SQL query. ").addContext("sql", sql).addContext("plugin", storagePluginName).build(logger);
    }
}
Also used : SQLException(java.sql.SQLException) ImmutableList(com.google.common.collect.ImmutableList) MajorType(org.apache.drill.common.types.TypeProtos.MajorType) MaterializedField(org.apache.drill.exec.record.MaterializedField) ResultSetMetaData(java.sql.ResultSetMetaData) ValueVector(org.apache.drill.exec.vector.ValueVector) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) MinorType(org.apache.drill.common.types.TypeProtos.MinorType)

Example 24 with MajorType

use of org.apache.drill.common.types.TypeProtos.MajorType in project drill by apache.

the class PruneScanRule method doOnMatch.

protected void doOnMatch(RelOptRuleCall call, Filter filterRel, Project projectRel, TableScan scanRel) {
    final String pruningClassName = getClass().getName();
    logger.info("Beginning partition pruning, pruning class: {}", pruningClassName);
    Stopwatch totalPruningTime = Stopwatch.createStarted();
    final PlannerSettings settings = PrelUtil.getPlannerSettings(call.getPlanner());
    PartitionDescriptor descriptor = getPartitionDescriptor(settings, scanRel);
    final BufferAllocator allocator = optimizerContext.getAllocator();
    final Object selection = getDrillTable(scanRel).getSelection();
    MetadataContext metaContext = null;
    if (selection instanceof FormatSelection) {
        metaContext = ((FormatSelection) selection).getSelection().getMetaContext();
    }
    RexNode condition = null;
    if (projectRel == null) {
        condition = filterRel.getCondition();
    } else {
        // get the filter as if it were below the projection.
        condition = RelOptUtil.pushFilterPastProject(filterRel.getCondition(), projectRel);
    }
    RewriteAsBinaryOperators visitor = new RewriteAsBinaryOperators(true, filterRel.getCluster().getRexBuilder());
    condition = condition.accept(visitor);
    Map<Integer, String> fieldNameMap = Maps.newHashMap();
    List<String> fieldNames = scanRel.getRowType().getFieldNames();
    BitSet columnBitset = new BitSet();
    BitSet partitionColumnBitSet = new BitSet();
    Map<Integer, Integer> partitionMap = Maps.newHashMap();
    int relColIndex = 0;
    for (String field : fieldNames) {
        final Integer partitionIndex = descriptor.getIdIfValid(field);
        if (partitionIndex != null) {
            fieldNameMap.put(partitionIndex, field);
            partitionColumnBitSet.set(partitionIndex);
            columnBitset.set(relColIndex);
            // mapping between the relColIndex and partitionIndex
            partitionMap.put(relColIndex, partitionIndex);
        }
        relColIndex++;
    }
    if (partitionColumnBitSet.isEmpty()) {
        logger.info("No partition columns are projected from the scan..continue. " + "Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
        setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
        return;
    }
    // stop watch to track how long we spend in different phases of pruning
    Stopwatch miscTimer = Stopwatch.createUnstarted();
    // track how long we spend building the filter tree
    miscTimer.start();
    FindPartitionConditions c = new FindPartitionConditions(columnBitset, filterRel.getCluster().getRexBuilder());
    c.analyze(condition);
    RexNode pruneCondition = c.getFinalCondition();
    BitSet referencedDirsBitSet = c.getReferencedDirs();
    logger.info("Total elapsed time to build and analyze filter tree: {} ms", miscTimer.elapsed(TimeUnit.MILLISECONDS));
    miscTimer.reset();
    if (pruneCondition == null) {
        logger.info("No conditions were found eligible for partition pruning." + "Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
        setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
        return;
    }
    // set up the partitions
    List<PartitionLocation> newPartitions = Lists.newArrayList();
    // total number of partitions
    long numTotal = 0;
    int batchIndex = 0;
    PartitionLocation firstLocation = null;
    LogicalExpression materializedExpr = null;
    String[] spInfo = null;
    int maxIndex = -1;
    BitSet matchBitSet = new BitSet();
    // Outer loop: iterate over a list of batches of PartitionLocations
    for (List<PartitionLocation> partitions : descriptor) {
        numTotal += partitions.size();
        logger.debug("Evaluating partition pruning for batch {}", batchIndex);
        if (batchIndex == 0) {
            // save the first location in case everything is pruned
            firstLocation = partitions.get(0);
        }
        final NullableBitVector output = new NullableBitVector(MaterializedField.create("", Types.optional(MinorType.BIT)), allocator);
        final VectorContainer container = new VectorContainer();
        try {
            final ValueVector[] vectors = new ValueVector[descriptor.getMaxHierarchyLevel()];
            for (int partitionColumnIndex : BitSets.toIter(partitionColumnBitSet)) {
                SchemaPath column = SchemaPath.getSimplePath(fieldNameMap.get(partitionColumnIndex));
                MajorType type = descriptor.getVectorType(column, settings);
                MaterializedField field = MaterializedField.create(column.getAsUnescapedPath(), type);
                ValueVector v = TypeHelper.getNewVector(field, allocator);
                v.allocateNew();
                vectors[partitionColumnIndex] = v;
                container.add(v);
            }
            // track how long we spend populating partition column vectors
            miscTimer.start();
            // populate partition vectors.
            descriptor.populatePartitionVectors(vectors, partitions, partitionColumnBitSet, fieldNameMap);
            logger.info("Elapsed time to populate partitioning column vectors: {} ms within batchIndex: {}", miscTimer.elapsed(TimeUnit.MILLISECONDS), batchIndex);
            miscTimer.reset();
            // materialize the expression; only need to do this once
            if (batchIndex == 0) {
                materializedExpr = materializePruneExpr(pruneCondition, settings, scanRel, container);
                if (materializedExpr == null) {
                    // continue without partition pruning; no need to log anything here since
                    // materializePruneExpr logs it already
                    logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
                    setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
                    return;
                }
            }
            output.allocateNew(partitions.size());
            // start the timer to evaluate how long we spend in the interpreter evaluation
            miscTimer.start();
            InterpreterEvaluator.evaluate(partitions.size(), optimizerContext, container, output, materializedExpr);
            logger.info("Elapsed time in interpreter evaluation: {} ms within batchIndex: {} with # of partitions : {}", miscTimer.elapsed(TimeUnit.MILLISECONDS), batchIndex, partitions.size());
            miscTimer.reset();
            int recordCount = 0;
            int qualifiedCount = 0;
            if (descriptor.supportsMetadataCachePruning() && partitions.get(0).isCompositePartition()) /* apply single partition check only for composite partitions */
            {
                // Inner loop: within each batch iterate over the PartitionLocations
                for (PartitionLocation part : partitions) {
                    assert part.isCompositePartition();
                    if (!output.getAccessor().isNull(recordCount) && output.getAccessor().get(recordCount) == 1) {
                        newPartitions.add(part);
                        // Rather than using the PartitionLocation, get the array of partition values for the directories that are
                        // referenced by the filter since we are not interested in directory references in other parts of the query.
                        Pair<String[], Integer> p = composePartition(referencedDirsBitSet, partitionMap, vectors, recordCount);
                        String[] parts = p.getLeft();
                        int tmpIndex = p.getRight();
                        maxIndex = Math.max(maxIndex, tmpIndex);
                        if (spInfo == null) {
                            // initialization
                            spInfo = parts;
                            for (int j = 0; j <= tmpIndex; j++) {
                                if (parts[j] != null) {
                                    matchBitSet.set(j);
                                }
                            }
                        } else {
                            // compare the new partition with existing partition
                            for (int j = 0; j <= tmpIndex; j++) {
                                if (parts[j] == null || spInfo[j] == null) {
                                    // nulls don't match
                                    matchBitSet.clear(j);
                                } else {
                                    if (!parts[j].equals(spInfo[j])) {
                                        matchBitSet.clear(j);
                                    }
                                }
                            }
                        }
                        qualifiedCount++;
                    }
                    recordCount++;
                }
            } else {
                // Inner loop: within each batch iterate over the PartitionLocations
                for (PartitionLocation part : partitions) {
                    if (!output.getAccessor().isNull(recordCount) && output.getAccessor().get(recordCount) == 1) {
                        newPartitions.add(part);
                        qualifiedCount++;
                    }
                    recordCount++;
                }
            }
            logger.debug("Within batch {}: total records: {}, qualified records: {}", batchIndex, recordCount, qualifiedCount);
            batchIndex++;
        } catch (Exception e) {
            logger.warn("Exception while trying to prune partition.", e);
            logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
            setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
            // continue without partition pruning
            return;
        } finally {
            container.clear();
            if (output != null) {
                output.clear();
            }
        }
    }
    try {
        if (newPartitions.size() == numTotal) {
            logger.info("No partitions were eligible for pruning");
            return;
        }
        // handle the case all partitions are filtered out.
        boolean canDropFilter = true;
        boolean wasAllPartitionsPruned = false;
        String cacheFileRoot = null;
        if (newPartitions.isEmpty()) {
            assert firstLocation != null;
            // Add the first non-composite partition location, since execution requires schema.
            // In such case, we should not drop filter.
            newPartitions.add(firstLocation.getPartitionLocationRecursive().get(0));
            canDropFilter = false;
            // NOTE: with DRILL-4530, the PruneScanRule may be called with only a list of
            // directories first and the non-composite partition location will still return
            // directories, not files.  So, additional processing is done depending on this flag
            wasAllPartitionsPruned = true;
            logger.info("All {} partitions were pruned; added back a single partition to allow creating a schema", numTotal);
            // set the cacheFileRoot appropriately
            if (firstLocation.isCompositePartition()) {
                cacheFileRoot = descriptor.getBaseTableLocation() + firstLocation.getCompositePartitionPath();
            }
        }
        logger.info("Pruned {} partitions down to {}", numTotal, newPartitions.size());
        List<RexNode> conjuncts = RelOptUtil.conjunctions(condition);
        List<RexNode> pruneConjuncts = RelOptUtil.conjunctions(pruneCondition);
        conjuncts.removeAll(pruneConjuncts);
        RexNode newCondition = RexUtil.composeConjunction(filterRel.getCluster().getRexBuilder(), conjuncts, false);
        RewriteCombineBinaryOperators reverseVisitor = new RewriteCombineBinaryOperators(true, filterRel.getCluster().getRexBuilder());
        condition = condition.accept(reverseVisitor);
        pruneCondition = pruneCondition.accept(reverseVisitor);
        if (descriptor.supportsMetadataCachePruning() && !wasAllPartitionsPruned) {
            // if metadata cache file could potentially be used, then assign a proper cacheFileRoot
            int index = -1;
            if (!matchBitSet.isEmpty()) {
                String path = "";
                index = matchBitSet.length() - 1;
                for (int j = 0; j < matchBitSet.length(); j++) {
                    if (!matchBitSet.get(j)) {
                        // stop at the first index with no match and use the immediate
                        // previous index
                        index = j - 1;
                        break;
                    }
                }
                for (int j = 0; j <= index; j++) {
                    path += "/" + spInfo[j];
                }
                cacheFileRoot = descriptor.getBaseTableLocation() + path;
            }
            if (index != maxIndex) {
                // if multiple partitions are being selected, we should not drop the filter
                // since we are reading the cache file at a parent/ancestor level
                canDropFilter = false;
            }
        }
        RelNode inputRel = descriptor.supportsMetadataCachePruning() ? descriptor.createTableScan(newPartitions, cacheFileRoot, wasAllPartitionsPruned, metaContext) : descriptor.createTableScan(newPartitions, wasAllPartitionsPruned);
        if (projectRel != null) {
            inputRel = projectRel.copy(projectRel.getTraitSet(), Collections.singletonList(inputRel));
        }
        if (newCondition.isAlwaysTrue() && canDropFilter) {
            call.transformTo(inputRel);
        } else {
            final RelNode newFilter = filterRel.copy(filterRel.getTraitSet(), Collections.singletonList(inputRel));
            call.transformTo(newFilter);
        }
        setPruneStatus(metaContext, PruneStatus.PRUNED);
    } catch (Exception e) {
        logger.warn("Exception while using the pruned partitions.", e);
    } finally {
        logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
    }
}
Also used : PlannerSettings(org.apache.drill.exec.planner.physical.PlannerSettings) Stopwatch(com.google.common.base.Stopwatch) FormatSelection(org.apache.drill.exec.store.dfs.FormatSelection) LogicalExpression(org.apache.drill.common.expression.LogicalExpression) NullableBitVector(org.apache.drill.exec.vector.NullableBitVector) SchemaPath(org.apache.drill.common.expression.SchemaPath) PartitionDescriptor(org.apache.drill.exec.planner.PartitionDescriptor) FileSystemPartitionDescriptor(org.apache.drill.exec.planner.FileSystemPartitionDescriptor) PartitionLocation(org.apache.drill.exec.planner.PartitionLocation) MajorType(org.apache.drill.common.types.TypeProtos.MajorType) BitSet(java.util.BitSet) MaterializedField(org.apache.drill.exec.record.MaterializedField) BufferAllocator(org.apache.drill.exec.memory.BufferAllocator) VectorContainer(org.apache.drill.exec.record.VectorContainer) ValueVector(org.apache.drill.exec.vector.ValueVector) RelNode(org.apache.calcite.rel.RelNode) MetadataContext(org.apache.drill.exec.store.dfs.MetadataContext) RexNode(org.apache.calcite.rex.RexNode)

Example 25 with MajorType

use of org.apache.drill.common.types.TypeProtos.MajorType in project drill by apache.

the class FunctionImplementationRegistry method functionReplacement.

/**
   * Checks if this function replacement is needed.
   *
   * @param functionCall function call
   * @return new function name is replacement took place, otherwise original function name
   */
private String functionReplacement(FunctionCall functionCall) {
    String funcName = functionCall.getName();
    if (functionCall.args.size() == 0) {
        return funcName;
    }
    boolean castToNullableNumeric = optionManager != null && optionManager.getOption(ExecConstants.CAST_TO_NULLABLE_NUMERIC_OPTION);
    if (!castToNullableNumeric) {
        return funcName;
    }
    MajorType majorType = functionCall.args.get(0).getMajorType();
    DataMode dataMode = majorType.getMode();
    MinorType minorType = majorType.getMinorType();
    if (CastFunctions.isReplacementNeeded(funcName, minorType)) {
        funcName = CastFunctions.getReplacingCastFunction(funcName, dataMode, minorType);
    }
    return funcName;
}
Also used : MajorType(org.apache.drill.common.types.TypeProtos.MajorType) DataMode(org.apache.drill.common.types.TypeProtos.DataMode) MinorType(org.apache.drill.common.types.TypeProtos.MinorType)

Aggregations

MajorType (org.apache.drill.common.types.TypeProtos.MajorType)34 MaterializedField (org.apache.drill.exec.record.MaterializedField)13 MinorType (org.apache.drill.common.types.TypeProtos.MinorType)8 ValueVector (org.apache.drill.exec.vector.ValueVector)8 LogicalExpression (org.apache.drill.common.expression.LogicalExpression)7 SchemaChangeException (org.apache.drill.exec.exception.SchemaChangeException)5 JVar (com.sun.codemodel.JVar)4 HoldingContainer (org.apache.drill.exec.expr.ClassGenerator.HoldingContainer)4 TypedFieldId (org.apache.drill.exec.record.TypedFieldId)4 ArrayList (java.util.ArrayList)3 ExecutionSetupException (org.apache.drill.common.exceptions.ExecutionSetupException)3 ErrorCollector (org.apache.drill.common.expression.ErrorCollector)3 FunctionCall (org.apache.drill.common.expression.FunctionCall)3 SchemaPath (org.apache.drill.common.expression.SchemaPath)3 ImmutableList (com.google.common.collect.ImmutableList)2 JClass (com.sun.codemodel.JClass)2 JExpression (com.sun.codemodel.JExpression)2 DrillRuntimeException (org.apache.drill.common.exceptions.DrillRuntimeException)2 ErrorCollectorImpl (org.apache.drill.common.expression.ErrorCollectorImpl)2 IfCondition (org.apache.drill.common.expression.IfExpression.IfCondition)2