Search in sources :

Example 26 with OptionManager

use of org.apache.drill.exec.server.options.OptionManager in project drill by apache.

the class ExternalSortBatchCreator method getBatch.

@Override
public AbstractRecordBatch<ExternalSort> getBatch(FragmentContext context, ExternalSort config, List<RecordBatch> children) throws ExecutionSetupException {
    Preconditions.checkArgument(children.size() == 1);
    // Prefer the managed version, but provide runtime and boot-time options
    // to disable it and revert to the "legacy" version. The legacy version
    // is retained primarily to allow cross-check testing against the managed
    // version, and as a fall back in the first release of the managed version.
    OptionManager optionManager = context.getOptions();
    boolean disableManaged = optionManager.getOption(ExecConstants.EXTERNAL_SORT_DISABLE_MANAGED_OPTION);
    if (!disableManaged) {
        DrillConfig drillConfig = context.getConfig();
        disableManaged = drillConfig.hasPath(ExecConstants.EXTERNAL_SORT_DISABLE_MANAGED) && drillConfig.getBoolean(ExecConstants.EXTERNAL_SORT_DISABLE_MANAGED);
    }
    if (disableManaged) {
        return new ExternalSortBatch(config, context, children.iterator().next());
    } else {
        return new org.apache.drill.exec.physical.impl.xsort.managed.ExternalSortBatch(config, context, children.iterator().next());
    }
}
Also used : DrillConfig(org.apache.drill.common.config.DrillConfig) OptionManager(org.apache.drill.exec.server.options.OptionManager)

Example 27 with OptionManager

use of org.apache.drill.exec.server.options.OptionManager in project drill by apache.

the class DefaultSqlHandler method convertToPrel.

protected Prel convertToPrel(RelNode drel) throws RelConversionException, SqlUnsupportedException {
    Preconditions.checkArgument(drel.getConvention() == DrillRel.DRILL_LOGICAL);
    final RelTraitSet traits = drel.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(DrillDistributionTrait.SINGLETON);
    Prel phyRelNode;
    try {
        final Stopwatch watch = Stopwatch.createStarted();
        final RelNode relNode = transform(PlannerType.VOLCANO, PlannerPhase.PHYSICAL, drel, traits, false);
        phyRelNode = (Prel) relNode.accept(new PrelFinalizer());
        // log externally as we need to finalize before traversing the tree.
        log(PlannerType.VOLCANO, PlannerPhase.PHYSICAL, phyRelNode, logger, watch);
    } catch (RelOptPlanner.CannotPlanException ex) {
        logger.error(ex.getMessage());
        if (JoinUtils.checkCartesianJoin(drel, new ArrayList<Integer>(), new ArrayList<Integer>(), new ArrayList<Boolean>())) {
            throw new UnsupportedRelOperatorException("This query cannot be planned possibly due to either a cartesian join or an inequality join");
        } else {
            throw ex;
        }
    }
    OptionManager queryOptions = context.getOptions();
    if (context.getPlannerSettings().isMemoryEstimationEnabled() && !MemoryEstimationVisitor.enoughMemory(phyRelNode, queryOptions, context.getActiveEndpoints().size())) {
        log("Not enough memory for this plan", phyRelNode, logger, null);
        logger.debug("Re-planning without hash operations.");
        queryOptions.setOption(OptionValue.createBoolean(OptionValue.OptionType.QUERY, PlannerSettings.HASHJOIN.getOptionName(), false));
        queryOptions.setOption(OptionValue.createBoolean(OptionValue.OptionType.QUERY, PlannerSettings.HASHAGG.getOptionName(), false));
        try {
            final RelNode relNode = transform(PlannerType.VOLCANO, PlannerPhase.PHYSICAL, drel, traits);
            phyRelNode = (Prel) relNode.accept(new PrelFinalizer());
        } catch (RelOptPlanner.CannotPlanException ex) {
            logger.error(ex.getMessage());
            if (JoinUtils.checkCartesianJoin(drel, new ArrayList<Integer>(), new ArrayList<Integer>(), new ArrayList<Boolean>())) {
                throw new UnsupportedRelOperatorException("This query cannot be planned possibly due to either a cartesian join or an inequality join");
            } else {
                throw ex;
            }
        }
    }
    /* The order of the following transformations is important */
    /*
     * 0.) For select * from join query, we need insert project on top of scan and a top project just
     * under screen operator. The project on top of scan will rename from * to T1*, while the top project
     * will rename T1* to *, before it output the final result. Only the top project will allow
     * duplicate columns, since user could "explicitly" ask for duplicate columns ( select *, col, *).
     * The rest of projects will remove the duplicate column when we generate POP in json format.
     */
    phyRelNode = StarColumnConverter.insertRenameProject(phyRelNode);
    /*
     * 1.)
     * Join might cause naming conflicts from its left and right child.
     * In such case, we have to insert Project to rename the conflicting names.
     */
    phyRelNode = JoinPrelRenameVisitor.insertRenameProject(phyRelNode);
    /*
     * 1.1) Swap left / right for INNER hash join, if left's row count is < (1 + margin) right's row count.
     * We want to have smaller dataset on the right side, since hash table builds on right side.
     */
    if (context.getPlannerSettings().isHashJoinSwapEnabled()) {
        phyRelNode = SwapHashJoinVisitor.swapHashJoin(phyRelNode, new Double(context.getPlannerSettings().getHashJoinSwapMarginFactor()));
    }
    if (context.getPlannerSettings().isParquetRowGroupFilterPushdownPlanningEnabled()) {
        phyRelNode = (Prel) transform(PlannerType.HEP_BOTTOM_UP, PlannerPhase.PHYSICAL_PARTITION_PRUNING, phyRelNode);
    }
    /*
     * 1.2) Break up all expressions with complex outputs into their own project operations
     */
    phyRelNode = phyRelNode.accept(new SplitUpComplexExpressions(config.getConverter().getTypeFactory(), context.getDrillOperatorTable(), context.getPlannerSettings().functionImplementationRegistry), null);
    /*
     * 1.3) Projections that contain reference to flatten are rewritten as Flatten operators followed by Project
     */
    phyRelNode = phyRelNode.accept(new RewriteProjectToFlatten(config.getConverter().getTypeFactory(), context.getDrillOperatorTable()), null);
    /*
     * 2.)
     * Since our operators work via names rather than indices, we have to make to reorder any
     * output before we return data to the user as we may have accidentally shuffled things.
     * This adds a trivial project to reorder columns prior to output.
     */
    phyRelNode = FinalColumnReorderer.addFinalColumnOrdering(phyRelNode);
    /*
     * 3.)
     * If two fragments are both estimated to be parallelization one, remove the exchange
     * separating them
     */
    phyRelNode = ExcessiveExchangeIdentifier.removeExcessiveEchanges(phyRelNode, targetSliceSize);
    /* 5.)
     * if the client does not support complex types (Map, Repeated)
     * insert a project which which would convert
     */
    if (!context.getSession().isSupportComplexTypes()) {
        logger.debug("Client does not support complex types, add ComplexToJson operator.");
        phyRelNode = ComplexToJsonPrelVisitor.addComplexToJsonPrel(phyRelNode);
    }
    /* 6.)
     * Insert LocalExchange (mux and/or demux) nodes
     */
    phyRelNode = InsertLocalExchangeVisitor.insertLocalExchanges(phyRelNode, queryOptions);
    /* 7.)
     * Next, we add any required selection vector removers given the supported encodings of each
     * operator. This will ultimately move to a new trait but we're managing here for now to avoid
     * introducing new issues in planning before the next release
     */
    phyRelNode = SelectionVectorPrelVisitor.addSelectionRemoversWhereNecessary(phyRelNode);
    /* 8.)
     * Finally, Make sure that the no rels are repeats.
     * This could happen in the case of querying the same table twice as Optiq may canonicalize these.
     */
    phyRelNode = RelUniqifier.uniqifyGraph(phyRelNode);
    return phyRelNode;
}
Also used : UnsupportedRelOperatorException(org.apache.drill.exec.work.foreman.UnsupportedRelOperatorException) RewriteProjectToFlatten(org.apache.drill.exec.planner.physical.visitor.RewriteProjectToFlatten) Stopwatch(com.google.common.base.Stopwatch) ArrayList(java.util.ArrayList) RelTraitSet(org.apache.calcite.plan.RelTraitSet) SplitUpComplexExpressions(org.apache.drill.exec.planner.physical.visitor.SplitUpComplexExpressions) RelOptPlanner(org.apache.calcite.plan.RelOptPlanner) OptionManager(org.apache.drill.exec.server.options.OptionManager) Prel(org.apache.drill.exec.planner.physical.Prel) RelNode(org.apache.calcite.rel.RelNode)

Example 28 with OptionManager

use of org.apache.drill.exec.server.options.OptionManager in project drill by axbaretto.

the class ExternalSortBatchCreator method getBatch.

@Override
public AbstractRecordBatch<ExternalSort> getBatch(ExecutorFragmentContext context, ExternalSort config, List<RecordBatch> children) throws ExecutionSetupException {
    Preconditions.checkArgument(children.size() == 1);
    // Prefer the managed version, but provide runtime and boot-time options
    // to disable it and revert to the "legacy" version. The legacy version
    // is retained primarily to allow cross-check testing against the managed
    // version, and as a fall back in the first release of the managed version.
    OptionManager optionManager = context.getOptions();
    boolean disableManaged = optionManager.getOption(ExecConstants.EXTERNAL_SORT_DISABLE_MANAGED_OPTION);
    if (!disableManaged) {
        DrillConfig drillConfig = context.getConfig();
        disableManaged = drillConfig.hasPath(ExecConstants.EXTERNAL_SORT_DISABLE_MANAGED) && drillConfig.getBoolean(ExecConstants.EXTERNAL_SORT_DISABLE_MANAGED);
    }
    if (disableManaged) {
        return new ExternalSortBatch(config, context, children.iterator().next());
    } else {
        return new org.apache.drill.exec.physical.impl.xsort.managed.ExternalSortBatch(config, context, children.iterator().next());
    }
}
Also used : DrillConfig(org.apache.drill.common.config.DrillConfig) OptionManager(org.apache.drill.exec.server.options.OptionManager)

Example 29 with OptionManager

use of org.apache.drill.exec.server.options.OptionManager in project drill by axbaretto.

the class MemoryAllocationUtilities method setupBufferedOpsMemoryAllocations.

/**
 * Helper method to setup Memory Allocations
 * <p>
 * Plan the memory for buffered operators (the only ones that can spill in this release)
 * based on assumptions. These assumptions are the amount of memory per node to give
 * to each query and the number of sort operators per node.
 * <p>
 * The reason the total
 * memory is an assumption is that we have know knowledge of the number of queries
 * that can run, so we need the user to tell use that information by configuring the
 * amount of memory to be assumed available to each query.
 * <p>
 * The number of sorts per node could be calculated, but we instead simply take
 * the worst case: the maximum per-query, per-node parallization and assume that
 * all sorts appear in all fragments &mdash; a gross oversimplification, but one
 * that Drill has long made.
 * <p>
 * since this method can be used in multiple places adding it in this class
 * rather than keeping it in Foreman
 * @param plan
 * @param queryContext
 */
public static void setupBufferedOpsMemoryAllocations(final PhysicalPlan plan, final QueryContext queryContext) {
    if (plan.getProperties().hasResourcePlan) {
        return;
    }
    // look for external sorts
    final List<PhysicalOperator> bufferedOpList = new LinkedList<>();
    for (final PhysicalOperator op : plan.getSortedOperators()) {
        if (op.isBufferedOperator()) {
            bufferedOpList.add(op);
        }
    }
    // if there are any sorts, compute the maximum allocation, and set it on them
    plan.getProperties().hasResourcePlan = true;
    if (bufferedOpList.isEmpty()) {
        return;
    }
    // Setup options, etc.
    final OptionManager optionManager = queryContext.getOptions();
    final long directMemory = DrillConfig.getMaxDirectMemory();
    // Compute per-node, per-query memory.
    final long maxAllocPerNode = computeQueryMemory(queryContext.getConfig(), optionManager, directMemory);
    logger.debug("Memory per query per node: {}", maxAllocPerNode);
    // Now divide up the memory by slices and operators.
    final long opMinMem = computeOperatorMemory(optionManager, maxAllocPerNode, bufferedOpList.size());
    for (final PhysicalOperator op : bufferedOpList) {
        final long alloc = Math.max(opMinMem, op.getInitialAllocation());
        op.setMaxAllocation(alloc);
    }
}
Also used : PhysicalOperator(org.apache.drill.exec.physical.base.PhysicalOperator) LinkedList(java.util.LinkedList) OptionManager(org.apache.drill.exec.server.options.OptionManager)

Example 30 with OptionManager

use of org.apache.drill.exec.server.options.OptionManager in project drill by axbaretto.

the class TestQueryMemoryAlloc method testDefaultOptions.

@Test
public void testDefaultOptions() throws Exception {
    OperatorFixture.Builder builder = OperatorFixture.builder();
    builder.systemOption(ExecConstants.PERCENT_MEMORY_PER_QUERY_KEY, 0.05);
    builder.systemOption(ExecConstants.MAX_QUERY_MEMORY_PER_NODE_KEY, 2 * ONE_GB);
    try (OperatorFixture fixture = builder.build()) {
        final OptionManager optionManager = fixture.getOptionManager();
        optionManager.setLocalOption(ExecConstants.PERCENT_MEMORY_PER_QUERY_KEY, 0.05);
        optionManager.setLocalOption(ExecConstants.MAX_QUERY_MEMORY_PER_NODE_KEY, 2 * ONE_GB);
        // Out-of-box memory, use query memory per node as floor.
        long mem = MemoryAllocationUtilities.computeQueryMemory(fixture.config(), optionManager, 8 * ONE_GB);
        assertEquals(2 * ONE_GB, mem);
        // Up to 40 GB, query memory dominates.
        mem = MemoryAllocationUtilities.computeQueryMemory(fixture.config(), optionManager, 40 * ONE_GB);
        assertEquals(2 * ONE_GB, mem);
        // After 40 GB, the percent dominates
        mem = MemoryAllocationUtilities.computeQueryMemory(fixture.config(), optionManager, 100 * ONE_GB);
        assertEquals(5 * ONE_GB, mem);
    }
}
Also used : OperatorFixture(org.apache.drill.test.OperatorFixture) OptionManager(org.apache.drill.exec.server.options.OptionManager) Test(org.junit.Test) DrillTest(org.apache.drill.test.DrillTest)

Aggregations

OptionManager (org.apache.drill.exec.server.options.OptionManager)39 Test (org.junit.Test)10 DrillTest (org.apache.drill.test.DrillTest)8 OperatorFixture (org.apache.drill.test.OperatorFixture)8 DrillbitContext (org.apache.drill.exec.server.DrillbitContext)7 OptionValue (org.apache.drill.exec.server.options.OptionValue)6 ExecutionSetupException (org.apache.drill.common.exceptions.ExecutionSetupException)5 SystemOptionManager (org.apache.drill.exec.server.options.SystemOptionManager)5 LinkedList (java.util.LinkedList)4 Callable (java.util.concurrent.Callable)4 ExecutionException (java.util.concurrent.ExecutionException)4 DrillConfig (org.apache.drill.common.config.DrillConfig)4 MajorType (org.apache.drill.common.types.TypeProtos.MajorType)4 SchemaChangeException (org.apache.drill.exec.exception.SchemaChangeException)4 MaterializedField (org.apache.drill.exec.record.MaterializedField)4 ArrayList (java.util.ArrayList)3 RelOptPlanner (org.apache.calcite.plan.RelOptPlanner)3 RelTraitSet (org.apache.calcite.plan.RelTraitSet)3 RelNode (org.apache.calcite.rel.RelNode)3 SqlSetOption (org.apache.calcite.sql.SqlSetOption)3