Search in sources :

Example 26 with ProjectionPlanNode

use of org.voltdb.plannodes.ProjectionPlanNode in project voltdb by VoltDB.

the class TestPlansSubQueries method testPartitionedSameLevel.

public void testPartitionedSameLevel() {
    // force it to be single partitioned.
    AbstractPlanNode pn;
    List<AbstractPlanNode> planNodes;
    String sql, sqlNoSimplification, equivalentSql;
    //
    // Single partition detection : single table
    //
    sql = "select A FROM (SELECT A FROM P1 WHERE A = 3) T1 ";
    sqlNoSimplification = "select A FROM (SELECT A FROM P1 WHERE A = 3 LIMIT 1) T1 ";
    equivalentSql = "SELECT A FROM P1 T1 WHERE A = 3";
    planNodes = compileToFragments(sqlNoSimplification);
    assertEquals(1, planNodes.size());
    pn = planNodes.get(0);
    assertTrue(pn instanceof SendPlanNode);
    pn = pn.getChild(0);
    checkSeqScan(pn, "T1", "A");
    pn = pn.getChild(0);
    checkPrimaryKeyIndexScan(pn, "P1", "A");
    assertNotNull(((IndexScanPlanNode) pn).getInlinePlanNode(PlanNodeType.PROJECTION));
    checkSubquerySimplification(sql, equivalentSql);
    sql = "select A, C FROM (SELECT A, C FROM P1 WHERE A = 3) T1 ";
    sqlNoSimplification = "select A, C FROM (SELECT A, C FROM P1 WHERE A = 3 LIMIT 1) T1 ";
    equivalentSql = "SELECT A, C FROM P1 T1 WHERE A = 3";
    planNodes = compileToFragments(sqlNoSimplification);
    assertEquals(1, planNodes.size());
    pn = planNodes.get(0);
    assertTrue(pn instanceof SendPlanNode);
    pn = pn.getChild(0);
    checkSeqScan(pn, "T1", "A", "C");
    pn = pn.getChild(0);
    checkPrimaryKeyIndexScan(pn, "P1", "A", "C");
    assertNotNull(((IndexScanPlanNode) pn).getInlinePlanNode(PlanNodeType.PROJECTION));
    checkSubquerySimplification(sql, equivalentSql);
    // Single partition query without selecting partition column from sub-query
    planNodes = compileToFragments("select C FROM (SELECT A, C FROM P1 WHERE A = 3 LIMIT 1) T1 ");
    assertEquals(1, planNodes.size());
    planNodes = compileToFragments("select C FROM (SELECT C FROM P1 WHERE A = 3 LIMIT 1) T1 ");
    assertEquals(1, planNodes.size());
    //
    // AdHoc multiple partitioned sub-select queries.
    //
    sql = "select A1, C FROM (SELECT A A1, C FROM P1) T1  ";
    sqlNoSimplification = "select A1, C FROM (SELECT DISTINCT A A1, C FROM P1) T1 ";
    equivalentSql = "SELECT A A1, C FROM P1 T1";
    planNodes = compileToFragments(sqlNoSimplification);
    assertEquals(2, planNodes.size());
    pn = planNodes.get(0).getChild(0);
    assertTrue(pn instanceof ProjectionPlanNode);
    pn = pn.getChild(0);
    assertTrue(pn instanceof ReceivePlanNode);
    pn = planNodes.get(1).getChild(0);
    checkSeqScan(pn, "T1", "A1", "C");
    checkSubquerySimplification(sql, equivalentSql);
    sql = "select A1 FROM (SELECT A A1, C FROM P1 WHERE A > 3) T1 ";
    sqlNoSimplification = "select A1 FROM (SELECT A A1, C FROM P1 WHERE A > 3 LIMIT 10) T1 ";
    equivalentSql = "SELECT A A1 FROM P1 T1 WHERE A > 3";
    planNodes = compileToFragments(sqlNoSimplification);
    assertEquals(2, planNodes.size());
    pn = planNodes.get(0).getChild(0);
    checkSeqScan(pn, "T1", "A1");
    checkSubquerySimplification(sql, equivalentSql);
    //
    // Group by
    //
    planNodes = compileToFragments("select C, SD FROM " + "(SELECT C, SUM(D) as SD FROM P1 GROUP BY C) T1 ");
    assertEquals(2, planNodes.size());
    pn = planNodes.get(0).getChild(0);
    checkSeqScan(pn, "T1", "C", "SD");
    assertNotNull(pn.getInlinePlanNode(PlanNodeType.PROJECTION));
    pn = pn.getChild(0);
    assertTrue(pn instanceof HashAggregatePlanNode);
    pn = pn.getChild(0);
    assertTrue(pn instanceof ReceivePlanNode);
    pn = planNodes.get(1);
    assertTrue(pn instanceof SendPlanNode);
    pn = pn.getChild(0);
    checkPrimaryKeyIndexScan(pn, "P1", "C", "SD");
    assertNotNull(pn.getInlinePlanNode(PlanNodeType.PROJECTION));
    assertNotNull(pn.getInlinePlanNode(PlanNodeType.HASHAGGREGATE));
    // rename group by column
    planNodes = compileToFragments("select X, SD FROM " + "(SELECT C AS X, SUM(D) as SD FROM P1 GROUP BY C) T1 ");
    assertEquals(2, planNodes.size());
    pn = planNodes.get(0).getChild(0);
    checkSeqScan(pn, "T1", "X", "SD");
    assertNotNull(pn.getInlinePlanNode(PlanNodeType.PROJECTION));
    pn = pn.getChild(0);
    assertTrue(pn instanceof HashAggregatePlanNode);
    pn = pn.getChild(0);
    assertTrue(pn instanceof ReceivePlanNode);
    pn = planNodes.get(1);
    assertTrue(pn instanceof SendPlanNode);
    pn = pn.getChild(0);
    checkPrimaryKeyIndexScan(pn, "P1", "C", "SD");
    assertNotNull(pn.getInlinePlanNode(PlanNodeType.PROJECTION));
    assertNotNull(pn.getInlinePlanNode(PlanNodeType.HASHAGGREGATE));
    AbstractPlanNode nlpn;
    //
    // Partitioned Joined tests
    //
    failToCompile("select * FROM " + "(SELECT C, SUM(D) as SD FROM P1 GROUP BY C) T1, P2 where T1.C = P2.A ", joinErrorMsg);
    planNodes = compileToFragments("select T1.C, T1.SD FROM " + "(SELECT C, SUM(D) as SD FROM P1 GROUP BY C) T1, R1 Where T1.C = R1.C ");
    assertEquals(2, planNodes.size());
    pn = planNodes.get(0).getChild(0);
    assertTrue(pn instanceof ProjectionPlanNode);
    nlpn = pn.getChild(0);
    assertTrue(nlpn instanceof NestLoopPlanNode);
    pn = nlpn.getChild(1);
    checkSeqScan(pn, "R1");
    pn = nlpn.getChild(0);
    checkSeqScan(pn, "T1", "C", "SD");
    assertNotNull(pn.getInlinePlanNode(PlanNodeType.PROJECTION));
    pn = pn.getChild(0);
    assertTrue(pn instanceof HashAggregatePlanNode);
    pn = pn.getChild(0);
    assertTrue(pn instanceof ReceivePlanNode);
    pn = planNodes.get(1);
    assertTrue(pn instanceof SendPlanNode);
    pn = pn.getChild(0);
    checkPrimaryKeyIndexScan(pn, "P1", "C", "SD");
    assertNotNull(pn.getInlinePlanNode(PlanNodeType.PROJECTION));
    assertNotNull(pn.getInlinePlanNode(PlanNodeType.HASHAGGREGATE));
    // Group by Partitioned column
    planNodes = compileToFragments("select C, SD FROM " + "(SELECT A, C, SUM(D) as SD FROM P1 WHERE A > 3 GROUP BY A, C) T1 ");
    assertEquals(2, planNodes.size());
    planNodes = compileToFragments("select C, SD FROM " + "(SELECT A, C, SUM(D) as SD FROM P1 WHERE A = 3 GROUP BY A, C) T1 ");
    assertEquals(1, planNodes.size());
    planNodes = compileToFragments("select T1.C, T1.SD FROM " + "(SELECT A, C, SUM(D) as SD FROM P1 WHERE A = 3 GROUP BY A, C) T1, R1 WHERE T1.C = R1.C ");
    assertEquals(1, planNodes.size());
    //
    // Limit
    //
    planNodes = compileToFragments("select C FROM (SELECT C FROM P1 WHERE A > 3 ORDER BY C LIMIT 5) T1 ");
    assertEquals(2, planNodes.size());
    planNodes = compileToFragments("select T1.C FROM (SELECT C FROM P1 WHERE A > 3 ORDER BY C LIMIT 5) T1, " + "R1 WHERE T1.C > R1.C ");
    assertEquals(2, planNodes.size());
    planNodes = compileToFragments("select C FROM (SELECT A, C FROM P1 WHERE A = 3 ORDER BY C LIMIT 5) T1 ");
    assertEquals(1, planNodes.size());
    // Without selecting partition column from sub-query
    planNodes = compileToFragments(("select C FROM (SELECT C FROM P1 WHERE A = 3 ORDER BY C LIMIT 5) T1 "));
    assertEquals(1, planNodes.size());
    planNodes = compileToFragments("select T1.C FROM (SELECT A, C FROM P1 WHERE A = 3 ORDER BY C LIMIT 5) T1, " + "R1 WHERE T1.C > R1.C ");
    assertEquals(1, planNodes.size());
    // Without selecting partition column from sub-query
    planNodes = compileToFragments("select T1.C FROM (SELECT C FROM P1 WHERE A = 3 ORDER BY C LIMIT 5) T1, " + "R1 WHERE T1.C > R1.C ");
    assertEquals(1, planNodes.size());
    //
    // Group by & LIMIT 5
    //
    planNodes = compileToFragments("select C, SD FROM " + "(SELECT C, SUM(D) as SD FROM P1 GROUP BY C ORDER BY C LIMIT 5) T1 ");
    assertEquals(2, planNodes.size());
    // Without selecting partition column from sub-query
    planNodes = compileToFragments("select C, SD FROM " + "(SELECT C, SUM(D) as SD FROM P1 WHERE A = 3 GROUP BY C ORDER BY C LIMIT 5) T1 ");
    assertEquals(1, planNodes.size());
}
Also used : AbstractPlanNode(org.voltdb.plannodes.AbstractPlanNode) SendPlanNode(org.voltdb.plannodes.SendPlanNode) MergeReceivePlanNode(org.voltdb.plannodes.MergeReceivePlanNode) ReceivePlanNode(org.voltdb.plannodes.ReceivePlanNode) HashAggregatePlanNode(org.voltdb.plannodes.HashAggregatePlanNode) NestLoopPlanNode(org.voltdb.plannodes.NestLoopPlanNode) ProjectionPlanNode(org.voltdb.plannodes.ProjectionPlanNode)

Example 27 with ProjectionPlanNode

use of org.voltdb.plannodes.ProjectionPlanNode in project voltdb by VoltDB.

the class TestPushDownAggregates method checkPushedDown.

private void checkPushedDown(List<AbstractPlanNode> pn, boolean isAggInlined, ExpressionType[] aggTypes, ExpressionType[] pushDownTypes, boolean hasProjectionNode) {
    // Aggregate push down check has to run on two fragments
    assertTrue(pn.size() == 2);
    AbstractPlanNode p = pn.get(0).getChild(0);
    ;
    if (hasProjectionNode) {
        // Complex aggregation or optimized AVG
        assertTrue(p instanceof ProjectionPlanNode);
        p = p.getChild(0);
    }
    assertTrue(p instanceof AggregatePlanNode);
    String fragmentString = p.toJSONString();
    ExpressionType[] topTypes = (pushDownTypes != null) ? pushDownTypes : aggTypes;
    for (ExpressionType type : topTypes) {
        assertTrue(fragmentString.contains("\"AGGREGATE_TYPE\":\"" + type.toString() + "\""));
    }
    // Check the pushed down aggregation
    p = pn.get(1).getChild(0);
    if (pushDownTypes == null) {
        assertTrue(p instanceof AbstractScanPlanNode);
        return;
    }
    if (isAggInlined) {
        // See ENG-6131
        if (p instanceof TableCountPlanNode) {
            return;
        }
        assertTrue(p instanceof AbstractScanPlanNode);
        assertTrue(p.getInlinePlanNode(PlanNodeType.AGGREGATE) != null || p.getInlinePlanNode(PlanNodeType.HASHAGGREGATE) != null);
        if (p.getInlinePlanNode(PlanNodeType.AGGREGATE) != null) {
            p = p.getInlinePlanNode(PlanNodeType.AGGREGATE);
        } else {
            p = p.getInlinePlanNode(PlanNodeType.HASHAGGREGATE);
        }
    } else {
        assertTrue(p instanceof AggregatePlanNode);
    }
    fragmentString = p.toJSONString();
    for (ExpressionType type : aggTypes) {
        assertTrue(fragmentString.contains("\"AGGREGATE_TYPE\":\"" + type.toString() + "\""));
    }
}
Also used : AbstractPlanNode(org.voltdb.plannodes.AbstractPlanNode) AbstractScanPlanNode(org.voltdb.plannodes.AbstractScanPlanNode) AggregatePlanNode(org.voltdb.plannodes.AggregatePlanNode) ExpressionType(org.voltdb.types.ExpressionType) ProjectionPlanNode(org.voltdb.plannodes.ProjectionPlanNode) TableCountPlanNode(org.voltdb.plannodes.TableCountPlanNode)

Example 28 with ProjectionPlanNode

use of org.voltdb.plannodes.ProjectionPlanNode in project voltdb by VoltDB.

the class TestUnion method testSelfUnion.

public void testSelfUnion() {
    AbstractPlanNode pn = compile("select B from T2 UNION select B from T2");
    assertTrue(pn.getChild(0) instanceof UnionPlanNode);
    pn = pn.getChild(0);
    assertTrue(pn.getChildCount() == 2);
    assertTrue(pn.getChild(0) instanceof SeqScanPlanNode);
    assertTrue(pn.getChild(1) instanceof SeqScanPlanNode);
    // The same table/alias is repeated twice in the union but in the different selects
    pn = compile("select A1.B from T2 A1, T2 A2 WHERE A1.B = A2.B UNION select B from T2 A1");
    assertTrue(pn.getChild(0) instanceof UnionPlanNode);
    pn = pn.getChild(0);
    assertTrue(pn.getChildCount() == 2);
    assertTrue(pn.getChild(0) instanceof ProjectionPlanNode);
    assertTrue(pn.getChild(0).getChild(0) instanceof NestLoopPlanNode);
    assertTrue(pn.getChild(1) instanceof SeqScanPlanNode);
    // BOTH sides are single-partitioned  for the same partition
    pn = compile("select F from T1 WHERE T1.A = 2 UNION select F from T1 WHERE T1.A = 2");
    assertTrue(pn.getChild(0) instanceof UnionPlanNode);
    // If BOTH sides are single-partitioned, but for different partitions,
    // it would theoretically be possible to satisfy
    // the query with a 2-fragment plan IFF the coordinator fragment could be forced to
    // execute on one of the designated single partitions.
    // At this point, coordinator designation is only supported for single-fragment plans.
    failToCompile("select DESC from T1 WHERE A = 1 UNION select DESC from T1 WHERE A = 2");
    // If both sides are multi-partitioned, there is no facility for pushing down the
    // union processing below the send/receive, so each child of the union requires
    // its own send/receive so the plan ends up as an unsupported 3-fragment plan.
    failToCompile("select DESC from T1 UNION select DESC from T1");
}
Also used : AbstractPlanNode(org.voltdb.plannodes.AbstractPlanNode) SeqScanPlanNode(org.voltdb.plannodes.SeqScanPlanNode) UnionPlanNode(org.voltdb.plannodes.UnionPlanNode) NestLoopPlanNode(org.voltdb.plannodes.NestLoopPlanNode) ProjectionPlanNode(org.voltdb.plannodes.ProjectionPlanNode)

Example 29 with ProjectionPlanNode

use of org.voltdb.plannodes.ProjectionPlanNode in project voltdb by VoltDB.

the class MaterializedViewFixInfo method processMVBasedQueryFix.

/**
     * Check whether the results from a materialized view need to be
     * re-aggregated on the coordinator by the view's GROUP BY columns
     * prior to any of the processing specified by the query.
     * This is normally the case when a mat view's source table is partitioned
     * and the view's GROUP BY does not include the partition key.
     * There is a special edge case where the query already contains the exact
     * reaggregations that the added-cost fix would introduce, so the fix can
     * be skipped as an optimization.
     * Set the m_needed flag to true, only if the reaggregation fix is needed.
     * @return The value of m_needed
     */
public boolean processMVBasedQueryFix(StmtTableScan mvTableScan, Set<SchemaColumn> scanColumns, JoinNode joinTree, List<ParsedColInfo> displayColumns, List<ParsedColInfo> groupByColumns) {
    //@TODO
    if (!(mvTableScan instanceof StmtTargetTableScan)) {
        return false;
    }
    Table table = ((StmtTargetTableScan) mvTableScan).getTargetTable();
    assert (table != null);
    String mvTableName = table.getTypeName();
    Table srcTable = table.getMaterializer();
    if (srcTable == null) {
        return false;
    }
    if (table.getIsreplicated()) {
        return false;
    }
    // Justify whether partition column is in group by column list or not
    if (table.getPartitioncolumn() != null) {
        return false;
    }
    m_mvTableScan = mvTableScan;
    Set<String> mvDDLGroupbyColumnNames = new HashSet<>();
    List<Column> mvColumnArray = CatalogUtil.getSortedCatalogItems(table.getColumns(), "index");
    String mvTableAlias = getMVTableAlias();
    // Get the number of group-by columns.
    int numOfGroupByColumns;
    MaterializedViewInfo mvInfo = srcTable.getViews().get(mvTableName);
    if (mvInfo != null) {
        // single table view
        String complexGroupbyJson = mvInfo.getGroupbyexpressionsjson();
        if (complexGroupbyJson.length() > 0) {
            List<AbstractExpression> mvComplexGroupbyCols = null;
            try {
                mvComplexGroupbyCols = AbstractExpression.fromJSONArrayString(complexGroupbyJson, null);
            } catch (JSONException e) {
                e.printStackTrace();
            }
            numOfGroupByColumns = mvComplexGroupbyCols.size();
        } else {
            numOfGroupByColumns = mvInfo.getGroupbycols().size();
        }
    } else {
        // joined table view
        MaterializedViewHandlerInfo mvHandlerInfo = table.getMvhandlerinfo().get("mvHandlerInfo");
        numOfGroupByColumns = mvHandlerInfo.getGroupbycolumncount();
    }
    if (scanColumns.isEmpty() && numOfGroupByColumns == 0) {
        // This is an edge case that can happen if the view
        // has no group by keys, and we are just
        // doing a count(*) on the output of the view.
        //
        // Having no GB keys or scan columns would cause us to
        // produce plan nodes that have a 0-column output schema.
        // We can't handle this in several places, so add the
        // count(*) column from the view to the scan columns.
        // this is the "count(*)" column.
        Column mvCol = mvColumnArray.get(0);
        TupleValueExpression tve = new TupleValueExpression(mvTableName, mvTableAlias, mvCol, 0);
        tve.setOrigStmtId(mvTableScan.getStatementId());
        String colName = mvCol.getName();
        SchemaColumn scol = new SchemaColumn(mvTableName, mvTableAlias, colName, colName, tve);
        scanColumns.add(scol);
    }
    // Start to do real materialized view processing to fix the duplicates problem.
    // (1) construct new projection columns for scan plan node.
    Set<SchemaColumn> mvDDLGroupbyColumns = new HashSet<>();
    NodeSchema inlineProjSchema = new NodeSchema();
    for (SchemaColumn scol : scanColumns) {
        inlineProjSchema.addColumn(scol);
    }
    for (int i = 0; i < numOfGroupByColumns; i++) {
        Column mvCol = mvColumnArray.get(i);
        String colName = mvCol.getName();
        TupleValueExpression tve = new TupleValueExpression(mvTableName, mvTableAlias, mvCol, i);
        tve.setOrigStmtId(mvTableScan.getStatementId());
        mvDDLGroupbyColumnNames.add(colName);
        SchemaColumn scol = new SchemaColumn(mvTableName, mvTableAlias, colName, colName, tve);
        mvDDLGroupbyColumns.add(scol);
        if (!scanColumns.contains(scol)) {
            scanColumns.add(scol);
            // construct new projection columns for scan plan node.
            inlineProjSchema.addColumn(scol);
        }
    }
    // Record the re-aggregation type for each scan columns.
    Map<String, ExpressionType> mvColumnReAggType = new HashMap<>();
    for (int i = numOfGroupByColumns; i < mvColumnArray.size(); i++) {
        Column mvCol = mvColumnArray.get(i);
        ExpressionType reAggType = ExpressionType.get(mvCol.getAggregatetype());
        if (reAggType == ExpressionType.AGGREGATE_COUNT_STAR || reAggType == ExpressionType.AGGREGATE_COUNT) {
            reAggType = ExpressionType.AGGREGATE_SUM;
        }
        mvColumnReAggType.put(mvCol.getName(), reAggType);
    }
    assert (inlineProjSchema.size() > 0);
    m_scanInlinedProjectionNode = new ProjectionPlanNode(inlineProjSchema);
    // (2) Construct the reAggregation Node.
    // Construct the reAggregation plan node's aggSchema
    m_reAggNode = new HashAggregatePlanNode();
    int outputColumnIndex = 0;
    // inlineProjSchema contains the group by columns, while aggSchema may do not.
    NodeSchema aggSchema = new NodeSchema();
    // Construct reAggregation node's aggregation and group by list.
    for (SchemaColumn scol : inlineProjSchema.getColumns()) {
        if (mvDDLGroupbyColumns.contains(scol)) {
            // Add group by expression.
            m_reAggNode.addGroupByExpression(scol.getExpression());
        } else {
            ExpressionType reAggType = mvColumnReAggType.get(scol.getColumnName());
            assert (reAggType != null);
            AbstractExpression agg_input_expr = scol.getExpression();
            assert (agg_input_expr instanceof TupleValueExpression);
            // Add aggregation information.
            m_reAggNode.addAggregate(reAggType, false, outputColumnIndex, agg_input_expr);
        }
        aggSchema.addColumn(scol);
        outputColumnIndex++;
    }
    assert (aggSchema.size() > 0);
    m_reAggNode.setOutputSchema(aggSchema);
    // Collect all TVEs that need to be do re-aggregation in coordinator.
    List<TupleValueExpression> needReAggTVEs = new ArrayList<>();
    List<AbstractExpression> aggPostExprs = new ArrayList<>();
    for (int i = numOfGroupByColumns; i < mvColumnArray.size(); i++) {
        Column mvCol = mvColumnArray.get(i);
        TupleValueExpression tve = new TupleValueExpression(mvTableName, mvTableAlias, mvCol, -1);
        tve.setOrigStmtId(mvTableScan.getStatementId());
        needReAggTVEs.add(tve);
    }
    collectReAggNodePostExpressions(joinTree, needReAggTVEs, aggPostExprs);
    AbstractExpression aggPostExpr = ExpressionUtil.combinePredicates(aggPostExprs);
    // Add post filters for the reAggregation node.
    m_reAggNode.setPostPredicate(aggPostExpr);
    // ENG-5386
    if (m_edgeCaseQueryNoFixNeeded && edgeCaseQueryNoFixNeeded(mvDDLGroupbyColumnNames, mvColumnReAggType, displayColumns, groupByColumns)) {
        return false;
    }
    m_needed = true;
    return true;
}
Also used : MaterializedViewInfo(org.voltdb.catalog.MaterializedViewInfo) TupleValueExpression(org.voltdb.expressions.TupleValueExpression) Table(org.voltdb.catalog.Table) HashMap(java.util.HashMap) SchemaColumn(org.voltdb.plannodes.SchemaColumn) HashAggregatePlanNode(org.voltdb.plannodes.HashAggregatePlanNode) ArrayList(java.util.ArrayList) JSONException(org.json_voltpatches.JSONException) AbstractExpression(org.voltdb.expressions.AbstractExpression) Column(org.voltdb.catalog.Column) SchemaColumn(org.voltdb.plannodes.SchemaColumn) StmtTargetTableScan(org.voltdb.planner.parseinfo.StmtTargetTableScan) MaterializedViewHandlerInfo(org.voltdb.catalog.MaterializedViewHandlerInfo) ExpressionType(org.voltdb.types.ExpressionType) NodeSchema(org.voltdb.plannodes.NodeSchema) HashSet(java.util.HashSet) ProjectionPlanNode(org.voltdb.plannodes.ProjectionPlanNode)

Example 30 with ProjectionPlanNode

use of org.voltdb.plannodes.ProjectionPlanNode in project voltdb by VoltDB.

the class TestWindowedFunctions method validateWindowedFunctionPlan.

/**
     * Validate that each similar windowed query in testRank produces a similar
     * plan, with the expected minor variation to its ORDER BY node.
     * @param windowedQuery a variant of a test query of a known basic format
     * @param nSorts the expected number of sort criteria that should have been
     *        extracted from the variant query's PARTITION BY and ORDER BY.
     * @param descSortIndex the position among the sort criteria of the original
     *        ORDER BY column, always distinguishable by its "DESC" direction.
     **/
private void validateWindowedFunctionPlan(String windowedQuery, int nSorts, int descSortIndex, int numPartitionExprs, ExpressionType winOpType) {
    // Sometimes we get multi-fragment nodes when we
    // expect single fragment nodes.  Keeping all the fragments
    // helps to diagnose the problem.
    List<AbstractPlanNode> nodes = compileToFragments(windowedQuery);
    assertEquals(1, nodes.size());
    AbstractPlanNode node = nodes.get(0);
    // The plan should look like:
    // SendNode -> ProjectionPlanNode -> PartitionByPlanNode -> OrderByPlanNode -> SeqScanNode
    // We also do some sanity checking on the PartitionPlan node.
    // First dissect the plan.
    assertTrue(node instanceof SendPlanNode);
    AbstractPlanNode projPlanNode = node.getChild(0);
    assertTrue(projPlanNode instanceof ProjectionPlanNode);
    AbstractPlanNode windowFuncPlanNode = projPlanNode.getChild(0);
    assertTrue(windowFuncPlanNode instanceof WindowFunctionPlanNode);
    AbstractPlanNode abstractOrderByNode = windowFuncPlanNode.getChild(0);
    assertTrue(abstractOrderByNode instanceof OrderByPlanNode);
    OrderByPlanNode orderByNode = (OrderByPlanNode) abstractOrderByNode;
    NodeSchema input_schema = orderByNode.getOutputSchema();
    assertNotNull(input_schema);
    AbstractPlanNode seqScanNode = orderByNode.getChild(0);
    assertTrue(seqScanNode instanceof SeqScanPlanNode || seqScanNode instanceof NestLoopPlanNode);
    WindowFunctionPlanNode wfPlanNode = (WindowFunctionPlanNode) windowFuncPlanNode;
    NodeSchema schema = wfPlanNode.getOutputSchema();
    //
    // Check that the window function plan node's output schema is correct.
    // Look at the first expression, to verify that it's the windowed expression.
    // Then check that the TVEs all make sense.
    //
    SchemaColumn column = schema.getColumns().get(0);
    assertEquals("ARANK", column.getColumnAlias());
    assertEquals(numPartitionExprs, wfPlanNode.getPartitionByExpressions().size());
    validateTVEs(input_schema, wfPlanNode, false);
    //
    // Check that the operation is what we expect.
    //
    assertTrue(wfPlanNode.getAggregateTypes().size() > 0);
    assertEquals(winOpType, wfPlanNode.getAggregateTypes().get(0));
    //
    for (List<AbstractExpression> exprs : wfPlanNode.getAggregateExpressions()) {
        if (exprs != null) {
            for (AbstractExpression expr : exprs) {
                assertNotNull(expr.getValueType());
            }
        }
    }
    //
    // Check that the order by node has the right number of expressions.
    // and that they have the correct order.
    //
    assertEquals(nSorts, orderByNode.getSortExpressions().size());
    int sortIndex = 0;
    for (SortDirectionType direction : orderByNode.getSortDirections()) {
        SortDirectionType expected = (sortIndex == descSortIndex) ? SortDirectionType.DESC : SortDirectionType.ASC;
        assertEquals(expected, direction);
        ++sortIndex;
    }
}
Also used : AbstractPlanNode(org.voltdb.plannodes.AbstractPlanNode) OrderByPlanNode(org.voltdb.plannodes.OrderByPlanNode) SendPlanNode(org.voltdb.plannodes.SendPlanNode) SchemaColumn(org.voltdb.plannodes.SchemaColumn) SortDirectionType(org.voltdb.types.SortDirectionType) NestLoopPlanNode(org.voltdb.plannodes.NestLoopPlanNode) SeqScanPlanNode(org.voltdb.plannodes.SeqScanPlanNode) AbstractExpression(org.voltdb.expressions.AbstractExpression) WindowFunctionPlanNode(org.voltdb.plannodes.WindowFunctionPlanNode) NodeSchema(org.voltdb.plannodes.NodeSchema) ProjectionPlanNode(org.voltdb.plannodes.ProjectionPlanNode)

Aggregations

ProjectionPlanNode (org.voltdb.plannodes.ProjectionPlanNode)51 AbstractPlanNode (org.voltdb.plannodes.AbstractPlanNode)48 OrderByPlanNode (org.voltdb.plannodes.OrderByPlanNode)25 HashAggregatePlanNode (org.voltdb.plannodes.HashAggregatePlanNode)17 SendPlanNode (org.voltdb.plannodes.SendPlanNode)16 AbstractScanPlanNode (org.voltdb.plannodes.AbstractScanPlanNode)14 AggregatePlanNode (org.voltdb.plannodes.AggregatePlanNode)14 MergeReceivePlanNode (org.voltdb.plannodes.MergeReceivePlanNode)13 NestLoopPlanNode (org.voltdb.plannodes.NestLoopPlanNode)13 ReceivePlanNode (org.voltdb.plannodes.ReceivePlanNode)13 AbstractExpression (org.voltdb.expressions.AbstractExpression)10 NodeSchema (org.voltdb.plannodes.NodeSchema)9 SeqScanPlanNode (org.voltdb.plannodes.SeqScanPlanNode)9 IndexScanPlanNode (org.voltdb.plannodes.IndexScanPlanNode)8 SchemaColumn (org.voltdb.plannodes.SchemaColumn)7 TupleValueExpression (org.voltdb.expressions.TupleValueExpression)5 AbstractReceivePlanNode (org.voltdb.plannodes.AbstractReceivePlanNode)5 LimitPlanNode (org.voltdb.plannodes.LimitPlanNode)4 NestLoopIndexPlanNode (org.voltdb.plannodes.NestLoopIndexPlanNode)4 Table (org.voltdb.catalog.Table)3