Search in sources :

Example 1 with NodeSchema

use of org.voltdb.plannodes.NodeSchema in project voltdb by VoltDB.

the class TestSelfJoins method testSelfJoin.

public void testSelfJoin() {
    AbstractPlanNode pn = compile("select * FROM R1 A JOIN R1 B ON A.C = B.C WHERE B.A > 0 AND A.C < 3");
    pn = pn.getChild(0).getChild(0);
    assertTrue(pn instanceof NestLoopPlanNode);
    assertEquals(4, pn.getOutputSchema().getColumns().size());
    assertEquals(2, pn.getChildCount());
    AbstractPlanNode c = pn.getChild(0);
    assertTrue(c instanceof SeqScanPlanNode);
    SeqScanPlanNode ss = (SeqScanPlanNode) c;
    assertEquals("R1", ss.getTargetTableName());
    assertEquals("A", ss.getTargetTableAlias());
    assertEquals(ExpressionType.COMPARE_LESSTHAN, ss.getPredicate().getExpressionType());
    c = pn.getChild(1);
    assertTrue(c instanceof SeqScanPlanNode);
    ss = (SeqScanPlanNode) c;
    assertEquals("R1", ss.getTargetTableName());
    assertEquals("B", ss.getTargetTableAlias());
    assertEquals(ExpressionType.COMPARE_GREATERTHAN, ss.getPredicate().getExpressionType());
    pn = compile("select * FROM R1 JOIN R1 B ON R1.C = B.C");
    pn = pn.getChild(0).getChild(0);
    assertTrue(pn instanceof NestLoopPlanNode);
    assertEquals(4, pn.getOutputSchema().getColumns().size());
    assertEquals(2, pn.getChildCount());
    c = pn.getChild(0);
    assertTrue(c instanceof SeqScanPlanNode);
    ss = (SeqScanPlanNode) c;
    assertEquals("R1", ss.getTargetTableName());
    assertEquals("R1", ss.getTargetTableAlias());
    c = pn.getChild(1);
    assertTrue(c instanceof SeqScanPlanNode);
    ss = (SeqScanPlanNode) c;
    assertEquals("R1", ss.getTargetTableName());
    assertEquals("B", ss.getTargetTableAlias());
    pn = compile("select A.A, A.C, B.A, B.C FROM R1 A JOIN R1 B ON A.C = B.C");
    pn = pn.getChild(0).getChild(0);
    assertTrue(pn instanceof NestLoopPlanNode);
    assertEquals(4, pn.getOutputSchema().getColumns().size());
    pn = compile("select A,B.C  FROM R1 A JOIN R2 B USING(A)");
    pn = pn.getChild(0);
    assertTrue(pn instanceof ProjectionPlanNode);
    NodeSchema ns = pn.getOutputSchema();
    for (SchemaColumn sc : ns.getColumns()) {
        AbstractExpression e = sc.getExpression();
        assertTrue(e instanceof TupleValueExpression);
        TupleValueExpression tve = (TupleValueExpression) e;
        assertNotSame(-1, tve.getColumnIndex());
    }
}
Also used : AbstractPlanNode(org.voltdb.plannodes.AbstractPlanNode) SeqScanPlanNode(org.voltdb.plannodes.SeqScanPlanNode) TupleValueExpression(org.voltdb.expressions.TupleValueExpression) AbstractExpression(org.voltdb.expressions.AbstractExpression) SchemaColumn(org.voltdb.plannodes.SchemaColumn) NestLoopPlanNode(org.voltdb.plannodes.NestLoopPlanNode) NodeSchema(org.voltdb.plannodes.NodeSchema) ProjectionPlanNode(org.voltdb.plannodes.ProjectionPlanNode)

Example 2 with NodeSchema

use of org.voltdb.plannodes.NodeSchema in project voltdb by VoltDB.

the class TestPlansSubQueries method checkOutputSchema.

private void checkOutputSchema(AbstractPlanNode planNode, String tableAlias, String[] columns) {
    NodeSchema schema = planNode.getOutputSchema();
    List<SchemaColumn> schemaColumn = schema.getColumns();
    assertEquals(columns.length, schemaColumn.size());
    for (int i = 0; i < schemaColumn.size(); ++i) {
        SchemaColumn col = schemaColumn.get(i);
        checkOutputColumn(tableAlias, columns[i], col);
    }
}
Also used : SchemaColumn(org.voltdb.plannodes.SchemaColumn) NodeSchema(org.voltdb.plannodes.NodeSchema)

Example 3 with NodeSchema

use of org.voltdb.plannodes.NodeSchema in project voltdb by VoltDB.

the class MaterializedViewFixInfo method processMVBasedQueryFix.

/**
     * Check whether the results from a materialized view need to be
     * re-aggregated on the coordinator by the view's GROUP BY columns
     * prior to any of the processing specified by the query.
     * This is normally the case when a mat view's source table is partitioned
     * and the view's GROUP BY does not include the partition key.
     * There is a special edge case where the query already contains the exact
     * reaggregations that the added-cost fix would introduce, so the fix can
     * be skipped as an optimization.
     * Set the m_needed flag to true, only if the reaggregation fix is needed.
     * @return The value of m_needed
     */
public boolean processMVBasedQueryFix(StmtTableScan mvTableScan, Set<SchemaColumn> scanColumns, JoinNode joinTree, List<ParsedColInfo> displayColumns, List<ParsedColInfo> groupByColumns) {
    //@TODO
    if (!(mvTableScan instanceof StmtTargetTableScan)) {
        return false;
    }
    Table table = ((StmtTargetTableScan) mvTableScan).getTargetTable();
    assert (table != null);
    String mvTableName = table.getTypeName();
    Table srcTable = table.getMaterializer();
    if (srcTable == null) {
        return false;
    }
    if (table.getIsreplicated()) {
        return false;
    }
    // Justify whether partition column is in group by column list or not
    if (table.getPartitioncolumn() != null) {
        return false;
    }
    m_mvTableScan = mvTableScan;
    Set<String> mvDDLGroupbyColumnNames = new HashSet<>();
    List<Column> mvColumnArray = CatalogUtil.getSortedCatalogItems(table.getColumns(), "index");
    String mvTableAlias = getMVTableAlias();
    // Get the number of group-by columns.
    int numOfGroupByColumns;
    MaterializedViewInfo mvInfo = srcTable.getViews().get(mvTableName);
    if (mvInfo != null) {
        // single table view
        String complexGroupbyJson = mvInfo.getGroupbyexpressionsjson();
        if (complexGroupbyJson.length() > 0) {
            List<AbstractExpression> mvComplexGroupbyCols = null;
            try {
                mvComplexGroupbyCols = AbstractExpression.fromJSONArrayString(complexGroupbyJson, null);
            } catch (JSONException e) {
                e.printStackTrace();
            }
            numOfGroupByColumns = mvComplexGroupbyCols.size();
        } else {
            numOfGroupByColumns = mvInfo.getGroupbycols().size();
        }
    } else {
        // joined table view
        MaterializedViewHandlerInfo mvHandlerInfo = table.getMvhandlerinfo().get("mvHandlerInfo");
        numOfGroupByColumns = mvHandlerInfo.getGroupbycolumncount();
    }
    if (scanColumns.isEmpty() && numOfGroupByColumns == 0) {
        // This is an edge case that can happen if the view
        // has no group by keys, and we are just
        // doing a count(*) on the output of the view.
        //
        // Having no GB keys or scan columns would cause us to
        // produce plan nodes that have a 0-column output schema.
        // We can't handle this in several places, so add the
        // count(*) column from the view to the scan columns.
        // this is the "count(*)" column.
        Column mvCol = mvColumnArray.get(0);
        TupleValueExpression tve = new TupleValueExpression(mvTableName, mvTableAlias, mvCol, 0);
        tve.setOrigStmtId(mvTableScan.getStatementId());
        String colName = mvCol.getName();
        SchemaColumn scol = new SchemaColumn(mvTableName, mvTableAlias, colName, colName, tve);
        scanColumns.add(scol);
    }
    // Start to do real materialized view processing to fix the duplicates problem.
    // (1) construct new projection columns for scan plan node.
    Set<SchemaColumn> mvDDLGroupbyColumns = new HashSet<>();
    NodeSchema inlineProjSchema = new NodeSchema();
    for (SchemaColumn scol : scanColumns) {
        inlineProjSchema.addColumn(scol);
    }
    for (int i = 0; i < numOfGroupByColumns; i++) {
        Column mvCol = mvColumnArray.get(i);
        String colName = mvCol.getName();
        TupleValueExpression tve = new TupleValueExpression(mvTableName, mvTableAlias, mvCol, i);
        tve.setOrigStmtId(mvTableScan.getStatementId());
        mvDDLGroupbyColumnNames.add(colName);
        SchemaColumn scol = new SchemaColumn(mvTableName, mvTableAlias, colName, colName, tve);
        mvDDLGroupbyColumns.add(scol);
        if (!scanColumns.contains(scol)) {
            scanColumns.add(scol);
            // construct new projection columns for scan plan node.
            inlineProjSchema.addColumn(scol);
        }
    }
    // Record the re-aggregation type for each scan columns.
    Map<String, ExpressionType> mvColumnReAggType = new HashMap<>();
    for (int i = numOfGroupByColumns; i < mvColumnArray.size(); i++) {
        Column mvCol = mvColumnArray.get(i);
        ExpressionType reAggType = ExpressionType.get(mvCol.getAggregatetype());
        if (reAggType == ExpressionType.AGGREGATE_COUNT_STAR || reAggType == ExpressionType.AGGREGATE_COUNT) {
            reAggType = ExpressionType.AGGREGATE_SUM;
        }
        mvColumnReAggType.put(mvCol.getName(), reAggType);
    }
    assert (inlineProjSchema.size() > 0);
    m_scanInlinedProjectionNode = new ProjectionPlanNode(inlineProjSchema);
    // (2) Construct the reAggregation Node.
    // Construct the reAggregation plan node's aggSchema
    m_reAggNode = new HashAggregatePlanNode();
    int outputColumnIndex = 0;
    // inlineProjSchema contains the group by columns, while aggSchema may do not.
    NodeSchema aggSchema = new NodeSchema();
    // Construct reAggregation node's aggregation and group by list.
    for (SchemaColumn scol : inlineProjSchema.getColumns()) {
        if (mvDDLGroupbyColumns.contains(scol)) {
            // Add group by expression.
            m_reAggNode.addGroupByExpression(scol.getExpression());
        } else {
            ExpressionType reAggType = mvColumnReAggType.get(scol.getColumnName());
            assert (reAggType != null);
            AbstractExpression agg_input_expr = scol.getExpression();
            assert (agg_input_expr instanceof TupleValueExpression);
            // Add aggregation information.
            m_reAggNode.addAggregate(reAggType, false, outputColumnIndex, agg_input_expr);
        }
        aggSchema.addColumn(scol);
        outputColumnIndex++;
    }
    assert (aggSchema.size() > 0);
    m_reAggNode.setOutputSchema(aggSchema);
    // Collect all TVEs that need to be do re-aggregation in coordinator.
    List<TupleValueExpression> needReAggTVEs = new ArrayList<>();
    List<AbstractExpression> aggPostExprs = new ArrayList<>();
    for (int i = numOfGroupByColumns; i < mvColumnArray.size(); i++) {
        Column mvCol = mvColumnArray.get(i);
        TupleValueExpression tve = new TupleValueExpression(mvTableName, mvTableAlias, mvCol, -1);
        tve.setOrigStmtId(mvTableScan.getStatementId());
        needReAggTVEs.add(tve);
    }
    collectReAggNodePostExpressions(joinTree, needReAggTVEs, aggPostExprs);
    AbstractExpression aggPostExpr = ExpressionUtil.combinePredicates(aggPostExprs);
    // Add post filters for the reAggregation node.
    m_reAggNode.setPostPredicate(aggPostExpr);
    // ENG-5386
    if (m_edgeCaseQueryNoFixNeeded && edgeCaseQueryNoFixNeeded(mvDDLGroupbyColumnNames, mvColumnReAggType, displayColumns, groupByColumns)) {
        return false;
    }
    m_needed = true;
    return true;
}
Also used : MaterializedViewInfo(org.voltdb.catalog.MaterializedViewInfo) TupleValueExpression(org.voltdb.expressions.TupleValueExpression) Table(org.voltdb.catalog.Table) HashMap(java.util.HashMap) SchemaColumn(org.voltdb.plannodes.SchemaColumn) HashAggregatePlanNode(org.voltdb.plannodes.HashAggregatePlanNode) ArrayList(java.util.ArrayList) JSONException(org.json_voltpatches.JSONException) AbstractExpression(org.voltdb.expressions.AbstractExpression) Column(org.voltdb.catalog.Column) SchemaColumn(org.voltdb.plannodes.SchemaColumn) StmtTargetTableScan(org.voltdb.planner.parseinfo.StmtTargetTableScan) MaterializedViewHandlerInfo(org.voltdb.catalog.MaterializedViewHandlerInfo) ExpressionType(org.voltdb.types.ExpressionType) NodeSchema(org.voltdb.plannodes.NodeSchema) HashSet(java.util.HashSet) ProjectionPlanNode(org.voltdb.plannodes.ProjectionPlanNode)

Example 4 with NodeSchema

use of org.voltdb.plannodes.NodeSchema in project voltdb by VoltDB.

the class ParsedSelectStmt method processAvgPushdownOptimization.

private void processAvgPushdownOptimization(VoltXMLElement displayElement, VoltXMLElement groupbyElement, VoltXMLElement havingElement, VoltXMLElement orderbyElement) {
    ArrayList<ParsedColInfo> tmpDisplayColumns = m_displayColumns;
    m_displayColumns = new ArrayList<>();
    ArrayList<ParsedColInfo> tmpAggResultColumns = m_aggResultColumns;
    m_aggResultColumns = new ArrayList<>();
    List<ParsedColInfo> tmpGroupByColumns = m_groupByColumns;
    m_groupByColumns = new ArrayList<>();
    ArrayList<ParsedColInfo> tmpDistinctGroupByColumns = m_distinctGroupByColumns;
    m_distinctGroupByColumns = new ArrayList<>();
    ArrayList<ParsedColInfo> tmpOrderColumns = m_orderColumns;
    m_orderColumns = new ArrayList<>();
    AbstractExpression tmpHaving = m_having;
    boolean tmpHasComplexAgg = hasComplexAgg();
    NodeSchema tmpProjectSchema = m_projectSchema;
    NodeSchema tmpDistinctProjectSchema = m_distinctProjectSchema;
    m_aggregationList = new ArrayList<>();
    assert (displayElement != null);
    parseDisplayColumns(displayElement, true);
    // rewrite DISTINCT
    // function may need to change the groupbyElement by rewriting.
    groupbyElement = processDistinct(displayElement, groupbyElement, havingElement);
    if (groupbyElement != null) {
        parseGroupByColumns(groupbyElement);
        insertToColumnList(m_aggResultColumns, m_groupByColumns);
    }
    if (havingElement != null) {
        parseHavingExpression(havingElement, true);
    }
    if (orderbyElement != null && !hasAOneRowResult()) {
        parseOrderColumns(orderbyElement, true);
    }
    m_aggregationList = null;
    fillUpAggResultColumns();
    placeTVEsinColumns();
    // Switch them back
    m_avgPushdownDisplayColumns = m_displayColumns;
    m_avgPushdownAggResultColumns = m_aggResultColumns;
    m_avgPushdownGroupByColumns = m_groupByColumns;
    m_avgPushdownDistinctGroupByColumns = m_distinctGroupByColumns;
    m_avgPushdownOrderColumns = m_orderColumns;
    m_avgPushdownProjectSchema = m_projectSchema;
    m_avgPushdownFinalProjectSchema = m_distinctProjectSchema;
    m_avgPushdownHaving = m_having;
    m_displayColumns = tmpDisplayColumns;
    m_aggResultColumns = tmpAggResultColumns;
    m_groupByColumns = tmpGroupByColumns;
    m_distinctGroupByColumns = tmpDistinctGroupByColumns;
    m_orderColumns = tmpOrderColumns;
    m_projectSchema = tmpProjectSchema;
    m_distinctProjectSchema = tmpDistinctProjectSchema;
    m_hasComplexAgg = tmpHasComplexAgg;
    m_having = tmpHaving;
}
Also used : AbstractExpression(org.voltdb.expressions.AbstractExpression) NodeSchema(org.voltdb.plannodes.NodeSchema)

Example 5 with NodeSchema

use of org.voltdb.plannodes.NodeSchema in project voltdb by VoltDB.

the class PlanAssembler method getNextInsertPlan.

/**
     * Get the next (only) plan for a SQL insertion. Inserts are pretty simple
     * and this will only generate a single plan.
     *
     * @return The next (only) plan for a given insert statement, then null.
     */
private CompiledPlan getNextInsertPlan() {
    // do it the right way once, then return null after that
    if (m_bestAndOnlyPlanWasGenerated) {
        return null;
    }
    m_bestAndOnlyPlanWasGenerated = true;
    // figure out which table we're inserting into
    assert (m_parsedInsert.m_tableList.size() == 1);
    Table targetTable = m_parsedInsert.m_tableList.get(0);
    StmtSubqueryScan subquery = m_parsedInsert.getSubqueryScan();
    CompiledPlan retval = null;
    String isContentDeterministic = null;
    if (subquery != null) {
        isContentDeterministic = subquery.calculateContentDeterminismMessage();
        if (subquery.getBestCostPlan() == null) {
            // in getBestCostPlan, above.
            throw new PlanningErrorException("INSERT INTO ... SELECT subquery could not be planned: " + m_recentErrorMsg);
        }
        boolean targetIsExportTable = tableListIncludesExportOnly(m_parsedInsert.m_tableList);
        InsertSubPlanAssembler subPlanAssembler = new InsertSubPlanAssembler(m_catalogDb, m_parsedInsert, m_partitioning, targetIsExportTable);
        AbstractPlanNode subplan = subPlanAssembler.nextPlan();
        if (subplan == null) {
            throw new PlanningErrorException(subPlanAssembler.m_recentErrorMsg);
        }
        assert (m_partitioning.isJoinValid());
        //  Use the subquery's plan as the basis for the insert plan.
        retval = subquery.getBestCostPlan();
    } else {
        retval = new CompiledPlan();
    }
    retval.setReadOnly(false);
    //      for the INSERT ... SELECT ... case, by analyzing the subquery.
    if (m_parsedInsert.m_isUpsert) {
        boolean hasPrimaryKey = false;
        for (Constraint constraint : targetTable.getConstraints()) {
            if (constraint.getType() != ConstraintType.PRIMARY_KEY.getValue()) {
                continue;
            }
            hasPrimaryKey = true;
            boolean targetsPrimaryKey = false;
            for (ColumnRef colRef : constraint.getIndex().getColumns()) {
                int primary = colRef.getColumn().getIndex();
                for (Column targetCol : m_parsedInsert.m_columns.keySet()) {
                    if (targetCol.getIndex() == primary) {
                        targetsPrimaryKey = true;
                        break;
                    }
                }
                if (!targetsPrimaryKey) {
                    throw new PlanningErrorException("UPSERT on table \"" + targetTable.getTypeName() + "\" must specify a value for primary key \"" + colRef.getColumn().getTypeName() + "\".");
                }
            }
        }
        if (!hasPrimaryKey) {
            throw new PlanningErrorException("UPSERT is not allowed on table \"" + targetTable.getTypeName() + "\" that has no primary key.");
        }
    }
    CatalogMap<Column> targetTableColumns = targetTable.getColumns();
    for (Column col : targetTableColumns) {
        boolean needsValue = (!m_parsedInsert.m_isUpsert) && (col.getNullable() == false) && (col.getDefaulttype() == 0);
        if (needsValue && !m_parsedInsert.m_columns.containsKey(col)) {
            // This check could be done during parsing?
            throw new PlanningErrorException("Column " + col.getName() + " has no default and is not nullable.");
        }
        // hint that this statement can be executed SP.
        if (col.equals(m_partitioning.getPartitionColForDML()) && subquery == null) {
            // When AdHoc insert-into-select is supported, we'll need to be able to infer
            // partitioning of the sub-select
            AbstractExpression expr = m_parsedInsert.getExpressionForPartitioning(col);
            String fullColumnName = targetTable.getTypeName() + "." + col.getTypeName();
            m_partitioning.addPartitioningExpression(fullColumnName, expr, expr.getValueType());
        }
    }
    NodeSchema matSchema = null;
    if (subquery == null) {
        matSchema = new NodeSchema();
    }
    int[] fieldMap = new int[m_parsedInsert.m_columns.size()];
    int i = 0;
    //   - For VALUES(...) insert statements, build the materialize node's schema
    for (Map.Entry<Column, AbstractExpression> e : m_parsedInsert.m_columns.entrySet()) {
        Column col = e.getKey();
        fieldMap[i] = col.getIndex();
        if (matSchema != null) {
            AbstractExpression valExpr = e.getValue();
            valExpr.setInBytes(col.getInbytes());
            // Patch over any mismatched expressions with an explicit cast.
            // Most impossible-to-cast type combinations should have already been caught by the
            // parser, but there are also runtime checks in the casting code
            // -- such as for out of range values.
            valExpr = castExprIfNeeded(valExpr, col);
            matSchema.addColumn(AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, col.getTypeName(), col.getTypeName(), valExpr);
        }
        i++;
    }
    // the root of the insert plan may be an InsertPlanNode, or
    // it may be a scan plan node.  We may do an inline InsertPlanNode
    // as well.
    InsertPlanNode insertNode = new InsertPlanNode();
    insertNode.setTargetTableName(targetTable.getTypeName());
    if (subquery != null) {
        insertNode.setSourceIsPartitioned(!subquery.getIsReplicated());
    }
    // The field map tells the insert node
    // where to put values produced by child into the row to be inserted.
    insertNode.setFieldMap(fieldMap);
    AbstractPlanNode root = insertNode;
    if (matSchema != null) {
        MaterializePlanNode matNode = new MaterializePlanNode(matSchema);
        // connect the insert and the materialize nodes together
        insertNode.addAndLinkChild(matNode);
        retval.statementGuaranteesDeterminism(false, true, isContentDeterministic);
    } else {
        ScanPlanNodeWithInlineInsert planNode = (retval.rootPlanGraph instanceof ScanPlanNodeWithInlineInsert) ? ((ScanPlanNodeWithInlineInsert) retval.rootPlanGraph) : null;
        // Inline upsert might be possible, but not now.
        if (planNode != null && (!m_parsedInsert.m_isUpsert) && (!planNode.hasInlineAggregateNode())) {
            planNode.addInlinePlanNode(insertNode);
            root = planNode.getAbstractNode();
        } else {
            // Otherwise just make it out-of-line.
            insertNode.addAndLinkChild(retval.rootPlanGraph);
        }
    }
    if (m_partitioning.wasSpecifiedAsSingle() || m_partitioning.isInferredSingle()) {
        insertNode.setMultiPartition(false);
        retval.rootPlanGraph = root;
        return retval;
    }
    insertNode.setMultiPartition(true);
    // Add a compensating sum of modified tuple counts or a limit 1
    // AND a send on top of a union-like receive node.
    boolean isReplicated = targetTable.getIsreplicated();
    retval.rootPlanGraph = addCoordinatorToDMLNode(root, isReplicated);
    return retval;
}
Also used : StmtSubqueryScan(org.voltdb.planner.parseinfo.StmtSubqueryScan) AbstractPlanNode(org.voltdb.plannodes.AbstractPlanNode) Table(org.voltdb.catalog.Table) Constraint(org.voltdb.catalog.Constraint) InsertPlanNode(org.voltdb.plannodes.InsertPlanNode) MaterializePlanNode(org.voltdb.plannodes.MaterializePlanNode) Constraint(org.voltdb.catalog.Constraint) AbstractExpression(org.voltdb.expressions.AbstractExpression) Column(org.voltdb.catalog.Column) SchemaColumn(org.voltdb.plannodes.SchemaColumn) ColumnRef(org.voltdb.catalog.ColumnRef) Map(java.util.Map) CatalogMap(org.voltdb.catalog.CatalogMap) HashMap(java.util.HashMap) NodeSchema(org.voltdb.plannodes.NodeSchema)

Aggregations

NodeSchema (org.voltdb.plannodes.NodeSchema)21 SchemaColumn (org.voltdb.plannodes.SchemaColumn)15 AbstractPlanNode (org.voltdb.plannodes.AbstractPlanNode)14 AbstractExpression (org.voltdb.expressions.AbstractExpression)13 ProjectionPlanNode (org.voltdb.plannodes.ProjectionPlanNode)9 TupleValueExpression (org.voltdb.expressions.TupleValueExpression)8 AbstractScanPlanNode (org.voltdb.plannodes.AbstractScanPlanNode)8 Table (org.voltdb.catalog.Table)4 HashAggregatePlanNode (org.voltdb.plannodes.HashAggregatePlanNode)4 HashMap (java.util.HashMap)3 Column (org.voltdb.catalog.Column)3 NestLoopPlanNode (org.voltdb.plannodes.NestLoopPlanNode)3 OrderByPlanNode (org.voltdb.plannodes.OrderByPlanNode)3 SendPlanNode (org.voltdb.plannodes.SendPlanNode)3 SeqScanPlanNode (org.voltdb.plannodes.SeqScanPlanNode)3 Constraint (org.voltdb.catalog.Constraint)2 AbstractSubqueryExpression (org.voltdb.expressions.AbstractSubqueryExpression)2 ScalarValueExpression (org.voltdb.expressions.ScalarValueExpression)2 TupleAddressExpression (org.voltdb.expressions.TupleAddressExpression)2 AggregatePlanNode (org.voltdb.plannodes.AggregatePlanNode)2