Search in sources :

Example 11 with ReceivePlanNode

use of org.voltdb.plannodes.ReceivePlanNode in project voltdb by VoltDB.

the class PlanAssembler method getNextSelectPlan.

private CompiledPlan getNextSelectPlan() {
    assert (m_subAssembler != null);
    // A matview reaggregation template plan may have been initialized
    // with a post-predicate expression moved from the statement's
    // join tree prior to any subquery planning.
    // Since normally subquery planning is driven from the join tree,
    // any subqueries that are moved out of the join tree would need
    // to be planned separately.
    // This planning would need to be done prior to calling
    // m_subAssembler.nextPlan()
    // because it can have query partitioning implications.
    // Under the current query limitations, the partitioning implications
    // are very simple -- subqueries are not allowed in multipartition
    // queries against partitioned data, so detection of a subquery in
    // the same query as a matview reaggregation can just return an error,
    // without any need for subquery planning here.
    HashAggregatePlanNode reAggNode = null;
    HashAggregatePlanNode mvReAggTemplate = m_parsedSelect.m_mvFixInfo.getReAggregationPlanNode();
    if (mvReAggTemplate != null) {
        reAggNode = new HashAggregatePlanNode(mvReAggTemplate);
        AbstractExpression postPredicate = reAggNode.getPostPredicate();
        if (postPredicate != null && postPredicate.hasSubquerySubexpression()) {
            // For now, this is just a special case violation of the limitation on
            // use of subquery expressions in MP queries on partitioned data.
            // That special case was going undetected when we didn't flag it here.
            m_recentErrorMsg = IN_EXISTS_SCALAR_ERROR_MESSAGE;
            return null;
        }
    // // Something more along these lines would have to be enabled
    // // to allow expression subqueries to be used in multi-partition
    // // matview queries.
    // if (!getBestCostPlanForExpressionSubQueries(subqueryExprs)) {
    //     // There was at least one sub-query and we should have a compiled plan for it
    //    return null;
    // }
    }
    AbstractPlanNode subSelectRoot = m_subAssembler.nextPlan();
    if (subSelectRoot == null) {
        m_recentErrorMsg = m_subAssembler.m_recentErrorMsg;
        return null;
    }
    AbstractPlanNode root = subSelectRoot;
    boolean mvFixNeedsProjection = false;
    /*
         * If the access plan for the table in the join order was for a
         * distributed table scan there must be a send/receive pair at the top
         * EXCEPT for the special outer join case in which a replicated table
         * was on the OUTER side of an outer join across from the (joined) scan
         * of the partitioned table(s) (all of them) in the query. In that case,
         * the one required send/receive pair is already in the plan below the
         * inner side of a NestLoop join.
         */
    if (m_partitioning.requiresTwoFragments()) {
        boolean mvFixInfoCoordinatorNeeded = true;
        boolean mvFixInfoEdgeCaseOuterJoin = false;
        ArrayList<AbstractPlanNode> receivers = root.findAllNodesOfClass(AbstractReceivePlanNode.class);
        if (receivers.size() == 1) {
            // Edge cases: left outer join with replicated table.
            if (m_parsedSelect.m_mvFixInfo.needed()) {
                mvFixInfoCoordinatorNeeded = false;
                AbstractPlanNode receiveNode = receivers.get(0);
                if (receiveNode.getParent(0) instanceof NestLoopPlanNode) {
                    if (subSelectRoot.hasInlinedIndexScanOfTable(m_parsedSelect.m_mvFixInfo.getMVTableName())) {
                        return getNextSelectPlan();
                    }
                    List<AbstractPlanNode> nljs = receiveNode.findAllNodesOfType(PlanNodeType.NESTLOOP);
                    List<AbstractPlanNode> nlijs = receiveNode.findAllNodesOfType(PlanNodeType.NESTLOOPINDEX);
                    // This is like a single table case.
                    if (nljs.size() + nlijs.size() == 0) {
                        mvFixInfoEdgeCaseOuterJoin = true;
                    }
                    root = handleMVBasedMultiPartQuery(reAggNode, root, mvFixInfoEdgeCaseOuterJoin);
                }
            }
        } else {
            if (receivers.size() > 0) {
                throw new PlanningErrorException("This special case join between an outer replicated table and " + "an inner partitioned table is too complex and is not supported.");
            }
            root = SubPlanAssembler.addSendReceivePair(root);
            // Root is a receive node here.
            assert (root instanceof ReceivePlanNode);
            if (m_parsedSelect.mayNeedAvgPushdown()) {
                m_parsedSelect.switchOptimalSuiteForAvgPushdown();
            }
            if (m_parsedSelect.m_tableList.size() > 1 && m_parsedSelect.m_mvFixInfo.needed() && subSelectRoot.hasInlinedIndexScanOfTable(m_parsedSelect.m_mvFixInfo.getMVTableName())) {
                // So, in-lined index scan of Nested loop index join can not be possible.
                return getNextSelectPlan();
            }
        }
        root = handleAggregationOperators(root);
        // Process the re-aggregate plan node and insert it into the plan.
        if (m_parsedSelect.m_mvFixInfo.needed() && mvFixInfoCoordinatorNeeded) {
            AbstractPlanNode tmpRoot = root;
            root = handleMVBasedMultiPartQuery(reAggNode, root, mvFixInfoEdgeCaseOuterJoin);
            if (root != tmpRoot) {
                mvFixNeedsProjection = true;
            }
        }
    } else {
        /*
             * There is no receive node and root is a single partition plan.
             */
        // If there is no receive plan node and no distributed plan has been generated,
        // the fix set for MV is not needed.
        m_parsedSelect.m_mvFixInfo.setNeeded(false);
        root = handleAggregationOperators(root);
    }
    // add a PartitionByPlanNode here.
    if (m_parsedSelect.hasWindowFunctionExpression()) {
        root = handleWindowedOperators(root);
    }
    if (m_parsedSelect.hasOrderByColumns()) {
        root = handleOrderBy(m_parsedSelect, root);
        if (m_parsedSelect.isComplexOrderBy() && root instanceof OrderByPlanNode) {
            AbstractPlanNode child = root.getChild(0);
            AbstractPlanNode grandChild = child.getChild(0);
            // swap the ORDER BY and complex aggregate Projection node
            if (child instanceof ProjectionPlanNode) {
                root.unlinkChild(child);
                child.unlinkChild(grandChild);
                child.addAndLinkChild(root);
                root.addAndLinkChild(grandChild);
                // update the new root
                root = child;
            } else if (m_parsedSelect.hasDistinctWithGroupBy() && child.getPlanNodeType() == PlanNodeType.HASHAGGREGATE && grandChild.getPlanNodeType() == PlanNodeType.PROJECTION) {
                AbstractPlanNode grandGrandChild = grandChild.getChild(0);
                child.clearParents();
                root.clearChildren();
                grandGrandChild.clearParents();
                grandChild.clearChildren();
                grandChild.addAndLinkChild(root);
                root.addAndLinkChild(grandGrandChild);
                root = child;
            }
        }
    }
    // node.
    if (mvFixNeedsProjection || needProjectionNode(root)) {
        root = addProjection(root);
    }
    if (m_parsedSelect.hasLimitOrOffset()) {
        root = handleSelectLimitOperator(root);
    }
    CompiledPlan plan = new CompiledPlan();
    plan.rootPlanGraph = root;
    plan.setReadOnly(true);
    boolean orderIsDeterministic = m_parsedSelect.isOrderDeterministic();
    boolean hasLimitOrOffset = m_parsedSelect.hasLimitOrOffset();
    String contentDeterminismMessage = m_parsedSelect.getContentDeterminismMessage();
    plan.statementGuaranteesDeterminism(hasLimitOrOffset, orderIsDeterministic, contentDeterminismMessage);
    // Apply the micro-optimization:
    // LIMIT push down, Table count / Counting Index, Optimized Min/Max
    MicroOptimizationRunner.applyAll(plan, m_parsedSelect);
    return plan;
}
Also used : AbstractPlanNode(org.voltdb.plannodes.AbstractPlanNode) OrderByPlanNode(org.voltdb.plannodes.OrderByPlanNode) AbstractReceivePlanNode(org.voltdb.plannodes.AbstractReceivePlanNode) MergeReceivePlanNode(org.voltdb.plannodes.MergeReceivePlanNode) ReceivePlanNode(org.voltdb.plannodes.ReceivePlanNode) HashAggregatePlanNode(org.voltdb.plannodes.HashAggregatePlanNode) NestLoopPlanNode(org.voltdb.plannodes.NestLoopPlanNode) AbstractExpression(org.voltdb.expressions.AbstractExpression) ProjectionPlanNode(org.voltdb.plannodes.ProjectionPlanNode)

Example 12 with ReceivePlanNode

use of org.voltdb.plannodes.ReceivePlanNode in project voltdb by VoltDB.

the class PlanAssembler method handleAggregationOperators.

private AbstractPlanNode handleAggregationOperators(AbstractPlanNode root) {
    /*
         * "Select A from T group by A" is grouped but has no aggregate operator
         * expressions. Catch that case by checking the grouped flag
         */
    if (m_parsedSelect.hasAggregateOrGroupby()) {
        AggregatePlanNode aggNode = null;
        // i.e., on the coordinator
        AggregatePlanNode topAggNode = null;
        IndexGroupByInfo gbInfo = new IndexGroupByInfo();
        if (root instanceof AbstractReceivePlanNode) {
            // for distinct that does not group by partition column
            if (!m_parsedSelect.hasAggregateDistinct() || m_parsedSelect.hasPartitionColumnInGroupby()) {
                AbstractPlanNode candidate = root.getChild(0).getChild(0);
                gbInfo.m_multiPartition = true;
                switchToIndexScanForGroupBy(candidate, gbInfo);
            }
        } else if (switchToIndexScanForGroupBy(root, gbInfo)) {
            root = gbInfo.m_indexAccess;
        }
        boolean needHashAgg = gbInfo.needHashAggregator(root, m_parsedSelect);
        // Construct the aggregate nodes
        if (needHashAgg) {
            if (m_parsedSelect.m_mvFixInfo.needed()) {
                // TODO: may optimize this edge case in future
                aggNode = new HashAggregatePlanNode();
            } else {
                if (gbInfo.isChangedToSerialAggregate()) {
                    assert (root instanceof ReceivePlanNode);
                    aggNode = new AggregatePlanNode();
                } else if (gbInfo.isChangedToPartialAggregate()) {
                    aggNode = new PartialAggregatePlanNode(gbInfo.m_coveredGroupByColumns);
                } else {
                    aggNode = new HashAggregatePlanNode();
                }
                topAggNode = new HashAggregatePlanNode();
            }
        } else {
            aggNode = new AggregatePlanNode();
            if (!m_parsedSelect.m_mvFixInfo.needed()) {
                topAggNode = new AggregatePlanNode();
            }
        }
        NodeSchema agg_schema = new NodeSchema();
        NodeSchema top_agg_schema = new NodeSchema();
        for (int outputColumnIndex = 0; outputColumnIndex < m_parsedSelect.m_aggResultColumns.size(); outputColumnIndex += 1) {
            ParsedColInfo col = m_parsedSelect.m_aggResultColumns.get(outputColumnIndex);
            AbstractExpression rootExpr = col.expression;
            AbstractExpression agg_input_expr = null;
            SchemaColumn schema_col = null;
            SchemaColumn top_schema_col = null;
            if (rootExpr instanceof AggregateExpression) {
                ExpressionType agg_expression_type = rootExpr.getExpressionType();
                agg_input_expr = rootExpr.getLeft();
                // A bit of a hack: ProjectionNodes after the
                // aggregate node need the output columns here to
                // contain TupleValueExpressions (effectively on a temp table).
                // So we construct one based on the output of the
                // aggregate expression, the column alias provided by HSQL,
                // and the offset into the output table schema for the
                // aggregate node that we're computing.
                // Oh, oh, it's magic, you know..
                TupleValueExpression tve = new TupleValueExpression(AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, "", col.alias, rootExpr, outputColumnIndex);
                tve.setDifferentiator(col.differentiator);
                boolean is_distinct = ((AggregateExpression) rootExpr).isDistinct();
                aggNode.addAggregate(agg_expression_type, is_distinct, outputColumnIndex, agg_input_expr);
                schema_col = new SchemaColumn(AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, "", col.alias, tve, outputColumnIndex);
                top_schema_col = new SchemaColumn(AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, "", col.alias, tve, outputColumnIndex);
                /*
                     * Special case count(*), count(), sum(), min() and max() to
                     * push them down to each partition. It will do the
                     * push-down if the select columns only contains the listed
                     * aggregate operators and other group-by columns. If the
                     * select columns includes any other aggregates, it will not
                     * do the push-down. - nshi
                     */
                if (topAggNode != null) {
                    ExpressionType top_expression_type = agg_expression_type;
                    /*
                         * For count(*), count() and sum(), the pushed-down
                         * aggregate node doesn't change. An extra sum()
                         * aggregate node is added to the coordinator to sum up
                         * the numbers from all the partitions. The input schema
                         * and the output schema of the sum() aggregate node is
                         * the same as the output schema of the push-down
                         * aggregate node.
                         *
                         * If DISTINCT is specified, don't do push-down for
                         * count() and sum() when not group by partition column.
                         * An exception is the aggregation arguments are the
                         * partition column (ENG-4980).
                         */
                    if (agg_expression_type == ExpressionType.AGGREGATE_COUNT_STAR || agg_expression_type == ExpressionType.AGGREGATE_COUNT || agg_expression_type == ExpressionType.AGGREGATE_SUM) {
                        if (is_distinct && !(m_parsedSelect.hasPartitionColumnInGroupby() || canPushDownDistinctAggregation((AggregateExpression) rootExpr))) {
                            topAggNode = null;
                        } else {
                            // for aggregate distinct when group by
                            // partition column, the top aggregate node
                            // will be dropped later, thus there is no
                            // effect to assign the top_expression_type.
                            top_expression_type = ExpressionType.AGGREGATE_SUM;
                        }
                    } else /*
                         * For min() and max(), the pushed-down aggregate node
                         * doesn't change. An extra aggregate node of the same
                         * type is added to the coordinator. The input schema
                         * and the output schema of the top aggregate node is
                         * the same as the output schema of the pushed-down
                         * aggregate node.
                         *
                         * APPROX_COUNT_DISTINCT can be similarly pushed down, but
                         * must be split into two different functions, which is
                         * done later, from pushDownAggregate().
                         */
                    if (agg_expression_type != ExpressionType.AGGREGATE_MIN && agg_expression_type != ExpressionType.AGGREGATE_MAX && agg_expression_type != ExpressionType.AGGREGATE_APPROX_COUNT_DISTINCT) {
                        /*
                             * Unsupported aggregate for push-down (AVG for example).
                             */
                        topAggNode = null;
                    }
                    if (topAggNode != null) {
                        /*
                             * Input column of the top aggregate node is the
                             * output column of the push-down aggregate node
                             */
                        boolean topDistinctFalse = false;
                        topAggNode.addAggregate(top_expression_type, topDistinctFalse, outputColumnIndex, tve);
                    }
                }
            // end if we have a top agg node
            } else {
                // has already been broken down.
                assert (!rootExpr.hasAnySubexpressionOfClass(AggregateExpression.class));
                /*
                     * These columns are the pass through columns that are not being
                     * aggregated on. These are the ones from the SELECT list. They
                     * MUST already exist in the child node's output. Find them and
                     * add them to the aggregate's output.
                     */
                schema_col = new SchemaColumn(col.tableName, col.tableAlias, col.columnName, col.alias, col.expression, outputColumnIndex);
                AbstractExpression topExpr = null;
                if (col.groupBy) {
                    topExpr = m_parsedSelect.m_groupByExpressions.get(col.alias);
                } else {
                    topExpr = col.expression;
                }
                top_schema_col = new SchemaColumn(col.tableName, col.tableAlias, col.columnName, col.alias, topExpr, outputColumnIndex);
            }
            agg_schema.addColumn(schema_col);
            top_agg_schema.addColumn(top_schema_col);
        }
        for (ParsedColInfo col : m_parsedSelect.groupByColumns()) {
            aggNode.addGroupByExpression(col.expression);
            if (topAggNode != null) {
                topAggNode.addGroupByExpression(m_parsedSelect.m_groupByExpressions.get(col.alias));
            }
        }
        aggNode.setOutputSchema(agg_schema);
        if (topAggNode != null) {
            if (m_parsedSelect.hasComplexGroupby()) {
                topAggNode.setOutputSchema(top_agg_schema);
            } else {
                topAggNode.setOutputSchema(agg_schema);
            }
        }
        // Never push down aggregation for MV fix case.
        root = pushDownAggregate(root, aggNode, topAggNode, m_parsedSelect);
    }
    return handleDistinctWithGroupby(root);
}
Also used : AbstractPlanNode(org.voltdb.plannodes.AbstractPlanNode) TupleValueExpression(org.voltdb.expressions.TupleValueExpression) AbstractReceivePlanNode(org.voltdb.plannodes.AbstractReceivePlanNode) HashAggregatePlanNode(org.voltdb.plannodes.HashAggregatePlanNode) AggregatePlanNode(org.voltdb.plannodes.AggregatePlanNode) PartialAggregatePlanNode(org.voltdb.plannodes.PartialAggregatePlanNode) AbstractReceivePlanNode(org.voltdb.plannodes.AbstractReceivePlanNode) MergeReceivePlanNode(org.voltdb.plannodes.MergeReceivePlanNode) ReceivePlanNode(org.voltdb.plannodes.ReceivePlanNode) PartialAggregatePlanNode(org.voltdb.plannodes.PartialAggregatePlanNode) HashAggregatePlanNode(org.voltdb.plannodes.HashAggregatePlanNode) SchemaColumn(org.voltdb.plannodes.SchemaColumn) AggregateExpression(org.voltdb.expressions.AggregateExpression) Constraint(org.voltdb.catalog.Constraint) AbstractExpression(org.voltdb.expressions.AbstractExpression) ExpressionType(org.voltdb.types.ExpressionType) NodeSchema(org.voltdb.plannodes.NodeSchema)

Example 13 with ReceivePlanNode

use of org.voltdb.plannodes.ReceivePlanNode in project voltdb by VoltDB.

the class TestPlansGroupBy method testGroupByA1.

public void testGroupByA1() {
    AbstractPlanNode p;
    AggregatePlanNode aggNode;
    List<AbstractPlanNode> pns;
    pns = compileToFragments("SELECT A1 from T1 group by A1");
    p = pns.get(0).getChild(0);
    assertTrue(p instanceof AggregatePlanNode);
    assertTrue(p.getChild(0) instanceof ReceivePlanNode);
    p = pns.get(1).getChild(0);
    assertTrue(p instanceof AbstractScanPlanNode);
    // No index, inline hash aggregate
    assertNotNull(p.getInlinePlanNode(PlanNodeType.HASHAGGREGATE));
    // Having
    pns = compileToFragments("SELECT A1, count(*) from T1 group by A1 Having count(*) > 3");
    p = pns.get(0).getChild(0);
    assertTrue(p instanceof AggregatePlanNode);
    aggNode = (AggregatePlanNode) p;
    assertNotNull(aggNode.getPostPredicate());
    assertTrue(p.getChild(0) instanceof ReceivePlanNode);
    p = pns.get(1).getChild(0);
    assertTrue(p instanceof AbstractScanPlanNode);
    // No index, inline hash aggregate
    assertNotNull(p.getInlinePlanNode(PlanNodeType.HASHAGGREGATE));
    aggNode = (AggregatePlanNode) p.getInlinePlanNode(PlanNodeType.HASHAGGREGATE);
    assertNull(aggNode.getPostPredicate());
}
Also used : AbstractPlanNode(org.voltdb.plannodes.AbstractPlanNode) AbstractScanPlanNode(org.voltdb.plannodes.AbstractScanPlanNode) HashAggregatePlanNode(org.voltdb.plannodes.HashAggregatePlanNode) AggregatePlanNode(org.voltdb.plannodes.AggregatePlanNode) ReceivePlanNode(org.voltdb.plannodes.ReceivePlanNode) AbstractReceivePlanNode(org.voltdb.plannodes.AbstractReceivePlanNode)

Example 14 with ReceivePlanNode

use of org.voltdb.plannodes.ReceivePlanNode in project voltdb by VoltDB.

the class TestPlansGroupBy method testInlineSerialAgg_noGroupBy_special.

// AVG is optimized with SUM / COUNT, generating extra projection node
// In future, inline projection for aggregation.
public void testInlineSerialAgg_noGroupBy_special() {
    AbstractPlanNode p;
    List<AbstractPlanNode> pns;
    pns = compileToFragments("SELECT AVG(A1) from T1");
    //* enable to debug */ printExplainPlan(pns);
    p = pns.get(0).getChild(0);
    assertTrue(p instanceof ProjectionPlanNode);
    assertTrue(p.getChild(0) instanceof AggregatePlanNode);
    assertTrue(p.getChild(0).getChild(0) instanceof ReceivePlanNode);
    p = pns.get(1).getChild(0);
    assertTrue(p instanceof SeqScanPlanNode);
    assertNotNull(p.getInlinePlanNode(PlanNodeType.PROJECTION));
    assertNotNull(p.getInlinePlanNode(PlanNodeType.AGGREGATE));
}
Also used : AbstractPlanNode(org.voltdb.plannodes.AbstractPlanNode) SeqScanPlanNode(org.voltdb.plannodes.SeqScanPlanNode) HashAggregatePlanNode(org.voltdb.plannodes.HashAggregatePlanNode) AggregatePlanNode(org.voltdb.plannodes.AggregatePlanNode) ReceivePlanNode(org.voltdb.plannodes.ReceivePlanNode) AbstractReceivePlanNode(org.voltdb.plannodes.AbstractReceivePlanNode) ProjectionPlanNode(org.voltdb.plannodes.ProjectionPlanNode)

Example 15 with ReceivePlanNode

use of org.voltdb.plannodes.ReceivePlanNode in project voltdb by VoltDB.

the class TestPlansGroupBy method checkMVReaggregateFeature.

// topNode, reAggNode
private void checkMVReaggregateFeature(List<AbstractPlanNode> pns, boolean needFix, int numGroupByOfTopAggNode, int numAggsOfTopAggNode, int numGroupByOfReaggNode, int numAggsOfReaggNode, boolean aggPushdown, boolean aggInline) {
    assertEquals(2, pns.size());
    AbstractPlanNode p = pns.get(0);
    assertTrue(p instanceof SendPlanNode);
    p = p.getChild(0);
    if (p instanceof ProjectionPlanNode) {
        p = p.getChild(0);
    }
    if (p instanceof LimitPlanNode) {
        // No limit pushed down.
        p = p.getChild(0);
    }
    if (p instanceof OrderByPlanNode) {
        p = p.getChild(0);
    }
    HashAggregatePlanNode reAggNode = null;
    List<AbstractPlanNode> nodes = p.findAllNodesOfClass(AbstractReceivePlanNode.class);
    assertEquals(1, nodes.size());
    AbstractPlanNode receiveNode = nodes.get(0);
    // Indicates that there is no top aggregation node.
    if (numGroupByOfTopAggNode == -1) {
        if (needFix) {
            p = receiveNode.getParent(0);
            assertTrue(p instanceof HashAggregatePlanNode);
            reAggNode = (HashAggregatePlanNode) p;
            assertEquals(numGroupByOfReaggNode, reAggNode.getGroupByExpressionsSize());
            assertEquals(numAggsOfReaggNode, reAggNode.getAggregateTypesSize());
            p = p.getChild(0);
        }
        assertTrue(p instanceof ReceivePlanNode);
        p = pns.get(1);
        assertTrue(p instanceof SendPlanNode);
        p = p.getChild(0);
        assertTrue(p instanceof AbstractScanPlanNode);
        return;
    }
    if (p instanceof ProjectionPlanNode) {
        p = p.getChild(0);
    }
    //
    // Hash top aggregate node
    //
    AggregatePlanNode topAggNode = null;
    if (p instanceof AbstractJoinPlanNode) {
        // Inline aggregation with join
        topAggNode = AggregatePlanNode.getInlineAggregationNode(p);
    } else {
        assertTrue(p instanceof AggregatePlanNode);
        topAggNode = (AggregatePlanNode) p;
        p = p.getChild(0);
    }
    assertEquals(numGroupByOfTopAggNode, topAggNode.getGroupByExpressionsSize());
    assertEquals(numAggsOfTopAggNode, topAggNode.getAggregateTypesSize());
    if (needFix) {
        p = receiveNode.getParent(0);
        assertTrue(p instanceof HashAggregatePlanNode);
        reAggNode = (HashAggregatePlanNode) p;
        assertEquals(numGroupByOfReaggNode, reAggNode.getGroupByExpressionsSize());
        assertEquals(numAggsOfReaggNode, reAggNode.getAggregateTypesSize());
        p = p.getChild(0);
    }
    assertTrue(p instanceof ReceivePlanNode);
    // Test the second part
    p = pns.get(1);
    assertTrue(p instanceof SendPlanNode);
    p = p.getChild(0);
    if (aggPushdown) {
        assertTrue(!needFix);
        if (aggInline) {
            assertNotNull(AggregatePlanNode.getInlineAggregationNode(p));
        } else {
            assertTrue(p instanceof AggregatePlanNode);
            p = p.getChild(0);
        }
    }
    if (needFix) {
        assertTrue(p instanceof AbstractScanPlanNode);
    } else {
        assertTrue(p instanceof AbstractScanPlanNode || p instanceof AbstractJoinPlanNode);
    }
}
Also used : AbstractPlanNode(org.voltdb.plannodes.AbstractPlanNode) AbstractScanPlanNode(org.voltdb.plannodes.AbstractScanPlanNode) OrderByPlanNode(org.voltdb.plannodes.OrderByPlanNode) HashAggregatePlanNode(org.voltdb.plannodes.HashAggregatePlanNode) AggregatePlanNode(org.voltdb.plannodes.AggregatePlanNode) SendPlanNode(org.voltdb.plannodes.SendPlanNode) ReceivePlanNode(org.voltdb.plannodes.ReceivePlanNode) AbstractReceivePlanNode(org.voltdb.plannodes.AbstractReceivePlanNode) AbstractJoinPlanNode(org.voltdb.plannodes.AbstractJoinPlanNode) HashAggregatePlanNode(org.voltdb.plannodes.HashAggregatePlanNode) LimitPlanNode(org.voltdb.plannodes.LimitPlanNode) ProjectionPlanNode(org.voltdb.plannodes.ProjectionPlanNode)

Aggregations

ReceivePlanNode (org.voltdb.plannodes.ReceivePlanNode)28 AbstractPlanNode (org.voltdb.plannodes.AbstractPlanNode)27 HashAggregatePlanNode (org.voltdb.plannodes.HashAggregatePlanNode)17 MergeReceivePlanNode (org.voltdb.plannodes.MergeReceivePlanNode)15 AbstractReceivePlanNode (org.voltdb.plannodes.AbstractReceivePlanNode)14 ProjectionPlanNode (org.voltdb.plannodes.ProjectionPlanNode)13 SendPlanNode (org.voltdb.plannodes.SendPlanNode)12 AggregatePlanNode (org.voltdb.plannodes.AggregatePlanNode)10 AbstractScanPlanNode (org.voltdb.plannodes.AbstractScanPlanNode)8 OrderByPlanNode (org.voltdb.plannodes.OrderByPlanNode)6 IndexScanPlanNode (org.voltdb.plannodes.IndexScanPlanNode)5 NestLoopPlanNode (org.voltdb.plannodes.NestLoopPlanNode)5 NestLoopIndexPlanNode (org.voltdb.plannodes.NestLoopIndexPlanNode)4 SeqScanPlanNode (org.voltdb.plannodes.SeqScanPlanNode)4 AbstractExpression (org.voltdb.expressions.AbstractExpression)2 AbstractJoinPlanNode (org.voltdb.plannodes.AbstractJoinPlanNode)2 PartialAggregatePlanNode (org.voltdb.plannodes.PartialAggregatePlanNode)2 PlanNodeType (org.voltdb.types.PlanNodeType)2 Constraint (org.voltdb.catalog.Constraint)1 AggregateExpression (org.voltdb.expressions.AggregateExpression)1