Search in sources :

Example 11 with Constraint

use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.

the class MaterializedViewProcessor method startProcessing.

/**
     * Add materialized view info to the catalog for the tables that are
     * materialized views.
     * @throws VoltCompilerException
     */
public void startProcessing(Database db, HashMap<Table, String> matViewMap, TreeSet<String> exportTableNames) throws VoltCompilerException {
    HashSet<String> viewTableNames = new HashSet<>();
    for (Entry<Table, String> entry : matViewMap.entrySet()) {
        viewTableNames.add(entry.getKey().getTypeName());
    }
    for (Entry<Table, String> entry : matViewMap.entrySet()) {
        Table destTable = entry.getKey();
        String query = entry.getValue();
        // get the xml for the query
        VoltXMLElement xmlquery = null;
        try {
            xmlquery = m_hsql.getXMLCompiledStatement(query);
        } catch (HSQLParseException e) {
            e.printStackTrace();
        }
        assert (xmlquery != null);
        // parse the xml like any other sql statement
        ParsedSelectStmt stmt = null;
        try {
            stmt = (ParsedSelectStmt) AbstractParsedStmt.parse(query, xmlquery, null, db, null);
        } catch (Exception e) {
            throw m_compiler.new VoltCompilerException(e.getMessage());
        }
        assert (stmt != null);
        String viewName = destTable.getTypeName();
        // throw an error if the view isn't within voltdb's limited world view
        checkViewMeetsSpec(viewName, stmt);
        // The primary key index is yet to be defined (below).
        for (Index destIndex : destTable.getIndexes()) {
            if (destIndex.getUnique() || destIndex.getAssumeunique()) {
                String msg = "A UNIQUE or ASSUMEUNIQUE index is not allowed on a materialized view. " + "Remove the qualifier from the index " + destIndex.getTypeName() + "defined on the materialized view \"" + viewName + "\".";
                throw m_compiler.new VoltCompilerException(msg);
            }
        }
        // A Materialized view cannot depend on another view.
        for (Table srcTable : stmt.m_tableList) {
            if (viewTableNames.contains(srcTable.getTypeName())) {
                String msg = String.format("A materialized view (%s) can not be defined on another view (%s).", viewName, srcTable.getTypeName());
                throw m_compiler.new VoltCompilerException(msg);
            }
        }
        // The existing code base still need this materializer field to tell if a table
        // is a materialized view table. Leaving this for future refactoring.
        destTable.setMaterializer(stmt.m_tableList.get(0));
        List<Column> destColumnArray = CatalogUtil.getSortedCatalogItems(destTable.getColumns(), "index");
        List<AbstractExpression> groupbyExprs = null;
        if (stmt.hasComplexGroupby()) {
            groupbyExprs = new ArrayList<>();
            for (ParsedColInfo col : stmt.groupByColumns()) {
                groupbyExprs.add(col.expression);
            }
        }
        // Generate query XMLs for min/max recalculation (ENG-8641)
        boolean isMultiTableView = stmt.m_tableList.size() > 1;
        MatViewFallbackQueryXMLGenerator xmlGen = new MatViewFallbackQueryXMLGenerator(xmlquery, stmt.groupByColumns(), stmt.m_displayColumns, isMultiTableView);
        List<VoltXMLElement> fallbackQueryXMLs = xmlGen.getFallbackQueryXMLs();
        // index or constraint in order to avoid error and crash.
        if (stmt.groupByColumns().size() != 0) {
            Index pkIndex = destTable.getIndexes().add(HSQLInterface.AUTO_GEN_MATVIEW_IDX);
            pkIndex.setType(IndexType.BALANCED_TREE.getValue());
            pkIndex.setUnique(true);
            // assume index 1 throuh #grpByCols + 1 are the cols
            for (int i = 0; i < stmt.groupByColumns().size(); i++) {
                ColumnRef c = pkIndex.getColumns().add(String.valueOf(i));
                c.setColumn(destColumnArray.get(i));
                c.setIndex(i);
            }
            Constraint pkConstraint = destTable.getConstraints().add(HSQLInterface.AUTO_GEN_MATVIEW_CONST);
            pkConstraint.setType(ConstraintType.PRIMARY_KEY.getValue());
            pkConstraint.setIndex(pkIndex);
        }
        // If we have an unsafe MV message, then
        // remember it here.  We don't really know how
        // to transfer the message through the catalog, but
        // we can transmit the existence of the message.
        boolean isSafeForDDL = (stmt.getUnsafeMVMessage() == null);
        // Here the code path diverges for different kinds of views (single table view and joined table view)
        if (isMultiTableView) {
            // Materialized view on joined tables
            // Add mvHandlerInfo to the destTable:
            MaterializedViewHandlerInfo mvHandlerInfo = destTable.getMvhandlerinfo().add("mvHandlerInfo");
            mvHandlerInfo.setDesttable(destTable);
            for (Table srcTable : stmt.m_tableList) {
                // Now we do not support having a view on persistent tables joining streamed tables.
                if (exportTableNames.contains(srcTable.getTypeName())) {
                    String msg = String.format("A materialized view (%s) on joined tables cannot have streamed table (%s) as its source.", viewName, srcTable.getTypeName());
                    throw m_compiler.new VoltCompilerException(msg);
                }
                // The view table will need to keep a list of its source tables.
                // The list is used to install / uninstall the view reference on the source tables when the
                // view handler is constructed / destroyed.
                TableRef tableRef = mvHandlerInfo.getSourcetables().add(srcTable.getTypeName());
                tableRef.setTable(srcTable);
                // There could be more than one partition column candidate, but we will only use the first one we found.
                if (destTable.getPartitioncolumn() == null && srcTable.getPartitioncolumn() != null) {
                    Column partitionColumn = srcTable.getPartitioncolumn();
                    String partitionColName = partitionColumn.getTypeName();
                    String srcTableName = srcTable.getTypeName();
                    destTable.setIsreplicated(false);
                    if (stmt.hasComplexGroupby()) {
                        for (int i = 0; i < groupbyExprs.size(); i++) {
                            AbstractExpression groupbyExpr = groupbyExprs.get(i);
                            if (groupbyExpr instanceof TupleValueExpression) {
                                TupleValueExpression tve = (TupleValueExpression) groupbyExpr;
                                if (tve.getTableName().equals(srcTableName) && tve.getColumnName().equals(partitionColName)) {
                                    // The partition column is set to destColumnArray.get(i), because we have the restriction
                                    // that the non-aggregate columns must come at the very begining, and must exactly match
                                    // the group-by columns.
                                    // If we are going to remove this restriction in the future, then we need to do more work
                                    // in order to find a proper partition column.
                                    destTable.setPartitioncolumn(destColumnArray.get(i));
                                    break;
                                }
                            }
                        }
                    } else {
                        for (int i = 0; i < stmt.groupByColumns().size(); i++) {
                            ParsedColInfo gbcol = stmt.groupByColumns().get(i);
                            if (gbcol.tableName.equals(srcTableName) && gbcol.columnName.equals(partitionColName)) {
                                destTable.setPartitioncolumn(destColumnArray.get(i));
                                break;
                            }
                        }
                    }
                }
            // end find partition column
            }
            // end for each source table
            compileFallbackQueriesAndUpdateCatalog(db, query, fallbackQueryXMLs, mvHandlerInfo);
            compileCreateQueryAndUpdateCatalog(db, query, xmlquery, mvHandlerInfo);
            mvHandlerInfo.setGroupbycolumncount(stmt.groupByColumns().size());
            for (int i = 0; i < stmt.m_displayColumns.size(); i++) {
                ParsedColInfo col = stmt.m_displayColumns.get(i);
                Column destColumn = destColumnArray.get(i);
                setTypeAttributesForColumn(destColumn, col.expression);
                // Set the expression type here to determine the behavior of the merge function.
                destColumn.setAggregatetype(col.expression.getExpressionType().getValue());
            }
            mvHandlerInfo.setIssafewithnonemptysources(isSafeForDDL);
        } else {
            // =======================================================================================
            // Materialized view on single table
            // create the materializedviewinfo catalog node for the source table
            Table srcTable = stmt.m_tableList.get(0);
            MaterializedViewInfo matviewinfo = srcTable.getViews().add(viewName);
            matviewinfo.setDest(destTable);
            AbstractExpression where = stmt.getSingleTableFilterExpression();
            if (where != null) {
                String hex = Encoder.hexEncode(where.toJSONString());
                matviewinfo.setPredicate(hex);
            } else {
                matviewinfo.setPredicate("");
            }
            List<Column> srcColumnArray = CatalogUtil.getSortedCatalogItems(srcTable.getColumns(), "index");
            if (stmt.hasComplexGroupby()) {
                // Parse group by expressions to json string
                String groupbyExprsJson = null;
                try {
                    groupbyExprsJson = DDLCompiler.convertToJSONArray(groupbyExprs);
                } catch (JSONException e) {
                    throw m_compiler.new VoltCompilerException("Unexpected error serializing non-column " + "expressions for group by expressions: " + e.toString());
                }
                matviewinfo.setGroupbyexpressionsjson(groupbyExprsJson);
            } else {
                // add the group by columns from the src table
                for (int i = 0; i < stmt.groupByColumns().size(); i++) {
                    ParsedColInfo gbcol = stmt.groupByColumns().get(i);
                    Column srcCol = srcColumnArray.get(gbcol.index);
                    ColumnRef cref = matviewinfo.getGroupbycols().add(srcCol.getTypeName());
                    // groupByColumns is iterating in order of groups. Store that grouping order
                    // in the column ref index. When the catalog is serialized, it will, naturally,
                    // scramble this order like a two year playing dominos, presenting the data
                    // in a meaningless sequence.
                    // the column offset in the view's grouping order
                    cref.setIndex(i);
                    // the source column from the base (non-view) table
                    cref.setColumn(srcCol);
                    // parse out the group by columns into the dest table
                    ParsedColInfo col = stmt.m_displayColumns.get(i);
                    Column destColumn = destColumnArray.get(i);
                    processMaterializedViewColumn(srcTable, destColumn, ExpressionType.VALUE_TUPLE, (TupleValueExpression) col.expression);
                }
            }
            // Set up COUNT(*) column
            ParsedColInfo countCol = stmt.m_displayColumns.get(stmt.groupByColumns().size());
            assert (countCol.expression.getExpressionType() == ExpressionType.AGGREGATE_COUNT_STAR);
            assert (countCol.expression.getLeft() == null);
            processMaterializedViewColumn(srcTable, destColumnArray.get(stmt.groupByColumns().size()), ExpressionType.AGGREGATE_COUNT_STAR, null);
            // prepare info for aggregation columns.
            List<AbstractExpression> aggregationExprs = new ArrayList<>();
            boolean hasAggregationExprs = false;
            ArrayList<AbstractExpression> minMaxAggs = new ArrayList<>();
            for (int i = stmt.groupByColumns().size() + 1; i < stmt.m_displayColumns.size(); i++) {
                ParsedColInfo col = stmt.m_displayColumns.get(i);
                AbstractExpression aggExpr = col.expression.getLeft();
                if (aggExpr.getExpressionType() != ExpressionType.VALUE_TUPLE) {
                    hasAggregationExprs = true;
                }
                aggregationExprs.add(aggExpr);
                if (col.expression.getExpressionType() == ExpressionType.AGGREGATE_MIN || col.expression.getExpressionType() == ExpressionType.AGGREGATE_MAX) {
                    minMaxAggs.add(aggExpr);
                }
            }
            compileFallbackQueriesAndUpdateCatalog(db, query, fallbackQueryXMLs, matviewinfo);
            // set Aggregation Expressions.
            if (hasAggregationExprs) {
                String aggregationExprsJson = null;
                try {
                    aggregationExprsJson = DDLCompiler.convertToJSONArray(aggregationExprs);
                } catch (JSONException e) {
                    throw m_compiler.new VoltCompilerException("Unexpected error serializing non-column " + "expressions for aggregation expressions: " + e.toString());
                }
                matviewinfo.setAggregationexpressionsjson(aggregationExprsJson);
            }
            // Find index for each min/max aggCol/aggExpr (ENG-6511 and ENG-8512)
            for (Integer i = 0; i < minMaxAggs.size(); ++i) {
                Index found = findBestMatchIndexForMatviewMinOrMax(matviewinfo, srcTable, groupbyExprs, minMaxAggs.get(i));
                IndexRef refFound = matviewinfo.getIndexforminmax().add(i.toString());
                if (found != null) {
                    refFound.setName(found.getTypeName());
                } else {
                    refFound.setName("");
                }
            }
            // The COUNT(*) should return a BIGINT column, whereas we found here the COUNT(*) was assigned a INTEGER column.
            for (int i = 0; i <= stmt.groupByColumns().size(); i++) {
                ParsedColInfo col = stmt.m_displayColumns.get(i);
                Column destColumn = destColumnArray.get(i);
                setTypeAttributesForColumn(destColumn, col.expression);
            }
            // parse out the aggregation columns into the dest table
            for (int i = stmt.groupByColumns().size() + 1; i < stmt.m_displayColumns.size(); i++) {
                ParsedColInfo col = stmt.m_displayColumns.get(i);
                Column destColumn = destColumnArray.get(i);
                AbstractExpression colExpr = col.expression.getLeft();
                TupleValueExpression tve = null;
                if (colExpr.getExpressionType() == ExpressionType.VALUE_TUPLE) {
                    tve = (TupleValueExpression) colExpr;
                }
                processMaterializedViewColumn(srcTable, destColumn, col.expression.getExpressionType(), tve);
                setTypeAttributesForColumn(destColumn, col.expression);
            }
            if (srcTable.getPartitioncolumn() != null) {
                // Set the partitioning of destination tables of associated views.
                // If a view's source table is replicated, then a full scan of the
                // associated view is single-sited. If the source is partitioned,
                // a full scan of the view must be distributed, unless it is filtered
                // by the original table's partitioning key, which, to be filtered,
                // must also be a GROUP BY key.
                destTable.setIsreplicated(false);
                setGroupedTablePartitionColumn(matviewinfo, srcTable.getPartitioncolumn());
            }
            matviewinfo.setIssafewithnonemptysources(isSafeForDDL);
        }
    // end if single table view materialized view.
    }
}
Also used : Constraint(org.voltdb.catalog.Constraint) ArrayList(java.util.ArrayList) Index(org.voltdb.catalog.Index) VoltXMLElement(org.hsqldb_voltpatches.VoltXMLElement) HSQLParseException(org.hsqldb_voltpatches.HSQLInterface.HSQLParseException) IndexRef(org.voltdb.catalog.IndexRef) ParsedColInfo(org.voltdb.planner.ParsedColInfo) Column(org.voltdb.catalog.Column) ParsedSelectStmt(org.voltdb.planner.ParsedSelectStmt) VoltCompilerException(org.voltdb.compiler.VoltCompiler.VoltCompilerException) HashSet(java.util.HashSet) MaterializedViewInfo(org.voltdb.catalog.MaterializedViewInfo) TupleValueExpression(org.voltdb.expressions.TupleValueExpression) Table(org.voltdb.catalog.Table) JSONException(org.json_voltpatches.JSONException) VoltCompilerException(org.voltdb.compiler.VoltCompiler.VoltCompilerException) JSONException(org.json_voltpatches.JSONException) HSQLParseException(org.hsqldb_voltpatches.HSQLInterface.HSQLParseException) Constraint(org.voltdb.catalog.Constraint) AbstractExpression(org.voltdb.expressions.AbstractExpression) ColumnRef(org.voltdb.catalog.ColumnRef) MaterializedViewHandlerInfo(org.voltdb.catalog.MaterializedViewHandlerInfo) TableRef(org.voltdb.catalog.TableRef)

Example 12 with Constraint

use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.

the class DDLCompiler method addConstraintToCatalog.

/**
     * Add a constraint on a given table to the catalog
     * @param table                The table on which the constraint will be enforced
     * @param node                 The XML node representing the constraint
     * @param indexReplacementMap
     * @throws VoltCompilerException
     */
private void addConstraintToCatalog(Table table, VoltXMLElement node, Map<String, String> indexReplacementMap, Map<String, Index> indexMap) throws VoltCompilerException {
    assert node.name.equals("constraint");
    String name = node.attributes.get("name");
    String typeName = node.attributes.get("constrainttype");
    ConstraintType type = ConstraintType.valueOf(typeName);
    String tableName = table.getTypeName();
    if (type == ConstraintType.LIMIT) {
        int tupleLimit = Integer.parseInt(node.attributes.get("rowslimit"));
        if (tupleLimit < 0) {
            throw m_compiler.new VoltCompilerException("Invalid constraint limit number '" + tupleLimit + "'");
        }
        if (tableLimitConstraintCounter.contains(tableName)) {
            throw m_compiler.new VoltCompilerException("Too many table limit constraints for table " + tableName);
        } else {
            tableLimitConstraintCounter.add(tableName);
        }
        table.setTuplelimit(tupleLimit);
        String deleteStmt = node.attributes.get("rowslimitdeletestmt");
        if (deleteStmt != null) {
            Statement catStmt = table.getTuplelimitdeletestmt().add("limit_delete");
            catStmt.setSqltext(deleteStmt);
            validateTupleLimitDeleteStmt(catStmt);
        }
        return;
    }
    if (type == ConstraintType.CHECK) {
        String msg = "VoltDB does not enforce check constraints. ";
        msg += "Constraint on table " + tableName + " will be ignored.";
        m_compiler.addWarn(msg);
        return;
    } else if (type == ConstraintType.FOREIGN_KEY) {
        String msg = "VoltDB does not enforce foreign key references and constraints. ";
        msg += "Constraint on table " + tableName + " will be ignored.";
        m_compiler.addWarn(msg);
        return;
    } else if (type == ConstraintType.MAIN) {
        // should never see these
        assert (false);
    } else if (type == ConstraintType.NOT_NULL) {
        // these get handled by table metadata inspection
        return;
    } else if (type != ConstraintType.PRIMARY_KEY && type != ConstraintType.UNIQUE) {
        throw m_compiler.new VoltCompilerException("Invalid constraint type '" + typeName + "'");
    }
    // else, create the unique index below
    // primary key code is in other places as well
    // The constraint is backed by an index, therefore we need to create it
    // TODO: We need to be able to use indexes for foreign keys. I am purposely
    //       leaving those out right now because HSQLDB just makes too many of them.
    Constraint catalog_const = table.getConstraints().add(name);
    String indexName = node.attributes.get("index");
    assert (indexName != null);
    // handle replacements from duplicate index pruning
    if (indexReplacementMap.containsKey(indexName)) {
        indexName = indexReplacementMap.get(indexName);
    }
    Index catalog_index = indexMap.get(indexName);
    // Attach the index to the catalog constraint (catalog_const).
    if (catalog_index != null) {
        catalog_const.setIndex(catalog_index);
        // This may be redundant.
        catalog_index.setUnique(true);
        boolean assumeUnique = Boolean.parseBoolean(node.attributes.get("assumeunique"));
        catalog_index.setAssumeunique(assumeUnique);
    }
    catalog_const.setType(type.getValue());
}
Also used : Constraint(org.voltdb.catalog.Constraint) Statement(org.voltdb.catalog.Statement) CatchAllVoltDBStatement(org.voltdb.compiler.statements.CatchAllVoltDBStatement) PartitionStatement(org.voltdb.compiler.statements.PartitionStatement) ConstraintType(org.voltdb.types.ConstraintType) Index(org.voltdb.catalog.Index) VoltCompilerException(org.voltdb.compiler.VoltCompiler.VoltCompilerException) Constraint(org.voltdb.catalog.Constraint)

Example 13 with Constraint

use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.

the class DefaultProcedureManager method addShimProcedure.

private void addShimProcedure(String name, Table table, Constraint pkey, boolean tableCols, int partitionParamIndex, Column partitionColumn, boolean readOnly) {
    Procedure proc = m_fakeDb.getProcedures().add(name);
    proc.setClassname(name);
    proc.setDefaultproc(true);
    proc.setHasjava(false);
    proc.setHasseqscans(false);
    proc.setSinglepartition(partitionParamIndex >= 0);
    proc.setPartitioncolumn(partitionColumn);
    proc.setPartitionparameter(partitionParamIndex);
    proc.setReadonly(readOnly);
    proc.setEverysite(false);
    proc.setSystemproc(false);
    proc.setPartitiontable(table);
    if (partitionParamIndex >= 0) {
        proc.setAttachment(new ProcedurePartitionInfo(VoltType.get((byte) partitionColumn.getType()), partitionParamIndex));
    }
    int paramCount = 0;
    if (tableCols) {
        for (Column col : table.getColumns()) {
            // name each parameter "param1", "param2", etc...
            ProcParameter procParam = proc.getParameters().add("param" + String.valueOf(paramCount));
            procParam.setIndex(col.getIndex());
            procParam.setIsarray(false);
            procParam.setType(col.getType());
            paramCount++;
        }
    }
    if (pkey != null) {
        CatalogMap<ColumnRef> pkeycols = pkey.getIndex().getColumns();
        int paramCount2 = paramCount;
        for (ColumnRef cref : pkeycols) {
            // name each parameter "param1", "param2", etc...
            ProcParameter procParam = proc.getParameters().add("param" + String.valueOf(paramCount2));
            procParam.setIndex(cref.getIndex() + paramCount);
            procParam.setIsarray(false);
            procParam.setType(cref.getColumn().getType());
            paramCount2++;
        }
    }
    m_defaultProcMap.put(name.toLowerCase(), proc);
}
Also used : ProcedurePartitionInfo(org.voltdb.CatalogContext.ProcedurePartitionInfo) Column(org.voltdb.catalog.Column) Procedure(org.voltdb.catalog.Procedure) ColumnRef(org.voltdb.catalog.ColumnRef) ProcParameter(org.voltdb.catalog.ProcParameter) Constraint(org.voltdb.catalog.Constraint)

Example 14 with Constraint

use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.

the class DefaultProcedureManager method sqlForDefaultProc.

public static String sqlForDefaultProc(Procedure defaultProc) {
    String name = defaultProc.getClassname();
    String[] parts = name.split("\\.");
    String action = parts[1];
    Table table = defaultProc.getPartitiontable();
    Column partitionColumn = defaultProc.getPartitioncolumn();
    final CatalogMap<Constraint> constraints = table.getConstraints();
    final Iterator<Constraint> it = constraints.iterator();
    Constraint pkey = null;
    while (it.hasNext()) {
        Constraint constraint = it.next();
        if (constraint.getType() == ConstraintType.PRIMARY_KEY.getValue()) {
            pkey = constraint;
            break;
        }
    }
    switch(action) {
        case "select":
            assert (defaultProc.getSinglepartition());
            return generateCrudSelect(table, partitionColumn, pkey);
        case "insert":
            if (defaultProc.getSinglepartition()) {
                return generateCrudInsert(table, partitionColumn);
            } else {
                return generateCrudReplicatedInsert(table);
            }
        case "update":
            if (defaultProc.getSinglepartition()) {
                return generateCrudUpdate(table, partitionColumn, pkey);
            } else {
                return generateCrudReplicatedUpdate(table, pkey);
            }
        case "delete":
            if (defaultProc.getSinglepartition()) {
                return generateCrudDelete(table, partitionColumn, pkey);
            } else {
                return generateCrudReplicatedDelete(table, pkey);
            }
        case "upsert":
            if (defaultProc.getSinglepartition()) {
                return generateCrudUpsert(table, partitionColumn);
            } else {
                return generateCrudReplicatedUpsert(table, pkey);
            }
        default:
            throw new RuntimeException("Invalid input to default proc SQL generator.");
    }
}
Also used : Table(org.voltdb.catalog.Table) Column(org.voltdb.catalog.Column) Constraint(org.voltdb.catalog.Constraint)

Example 15 with Constraint

use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.

the class AbstractParsedStmt method addHonoraryOrderByExpressions.

/**
     * Given a set of order-by expressions and a select list, which is a list of
     * columns, each with an expression and an alias, expand the order-by list
     * with new expressions which could be on the order-by list without changing
     * the sort order and which are otherwise helpful.
     */
protected void addHonoraryOrderByExpressions(HashSet<AbstractExpression> orderByExprs, List<ParsedColInfo> candidateColumns) {
    // of joins is the content of ticket ENG-8677.
    if (m_tableAliasMap.size() != 1) {
        return;
    }
    HashMap<AbstractExpression, Set<AbstractExpression>> valueEquivalence = analyzeValueEquivalence();
    for (ParsedColInfo colInfo : candidateColumns) {
        AbstractExpression colExpr = colInfo.expression;
        if (colExpr instanceof TupleValueExpression) {
            Set<AbstractExpression> tveEquivs = valueEquivalence.get(colExpr);
            if (tveEquivs != null) {
                for (AbstractExpression expr : tveEquivs) {
                    if (expr instanceof ParameterValueExpression || expr instanceof ConstantValueExpression) {
                        orderByExprs.add(colExpr);
                    }
                }
            }
        }
    }
    // We know there's exactly one.
    StmtTableScan scan = m_tableAliasMap.values().iterator().next();
    // Get the table.  There's only one.
    Table table = getTableFromDB(scan.getTableName());
    // there's no use to continue.
    if (table == null) {
        return;
    }
    // Now, look to see if there is a constraint which can help us.
    // If there is a unique constraint on a set of columns, and all
    // the constrained columns are in the order by list, then all
    // the columns in the table can be added to the order by list.
    //
    // The indices we care about have columns, but the order by list has expressions.
    // Extract the columns from the order by list.
    Set<Column> orderByColumns = new HashSet<>();
    for (AbstractExpression expr : orderByExprs) {
        if (expr instanceof TupleValueExpression) {
            TupleValueExpression tve = (TupleValueExpression) expr;
            Column col = table.getColumns().get(tve.getColumnName());
            orderByColumns.add(col);
        }
    }
    CatalogMap<Constraint> constraints = table.getConstraints();
    // If we have no constraints, there's nothing more to do here.
    if (constraints == null) {
        return;
    }
    Set<Index> indices = new HashSet<>();
    for (Constraint constraint : constraints) {
        Index index = constraint.getIndex();
        // Only use column indices for now.
        if (index != null && index.getUnique() && index.getExpressionsjson().isEmpty()) {
            indices.add(index);
        }
    }
    for (ParsedColInfo colInfo : candidateColumns) {
        AbstractExpression expr = colInfo.expression;
        if (expr instanceof TupleValueExpression) {
            TupleValueExpression tve = (TupleValueExpression) expr;
            // So, we remember this and early-out.
            for (Index index : indices) {
                CatalogMap<ColumnRef> columns = index.getColumns();
                // If all the columns in this index are in the current
                // honorary order by list, then we can add all the
                // columns in this table to the honorary order by list.
                boolean addAllColumns = true;
                for (ColumnRef cr : columns) {
                    Column col = cr.getColumn();
                    if (orderByColumns.contains(col) == false) {
                        addAllColumns = false;
                        break;
                    }
                }
                if (addAllColumns) {
                    for (Column addCol : table.getColumns()) {
                        // We have to convert this to a TVE to add
                        // it to the orderByExprs.  We will use -1
                        // for the column index.  We don't have a column
                        // alias.
                        TupleValueExpression ntve = new TupleValueExpression(tve.getTableName(), tve.getTableAlias(), addCol.getName(), null, -1);
                        orderByExprs.add(ntve);
                    }
                    // Don't forget to remember to forget the other indices.  (E. Presley, 1955)
                    break;
                }
            }
        }
    }
}
Also used : TupleValueExpression(org.voltdb.expressions.TupleValueExpression) HashSet(java.util.HashSet) Set(java.util.Set) Table(org.voltdb.catalog.Table) Constraint(org.voltdb.catalog.Constraint) Index(org.voltdb.catalog.Index) StmtTableScan(org.voltdb.planner.parseinfo.StmtTableScan) AbstractExpression(org.voltdb.expressions.AbstractExpression) Column(org.voltdb.catalog.Column) SchemaColumn(org.voltdb.plannodes.SchemaColumn) ConstantValueExpression(org.voltdb.expressions.ConstantValueExpression) ColumnRef(org.voltdb.catalog.ColumnRef) ParameterValueExpression(org.voltdb.expressions.ParameterValueExpression) HashSet(java.util.HashSet)

Aggregations

Constraint (org.voltdb.catalog.Constraint)17 Table (org.voltdb.catalog.Table)13 Column (org.voltdb.catalog.Column)12 ColumnRef (org.voltdb.catalog.ColumnRef)10 Index (org.voltdb.catalog.Index)9 ArrayList (java.util.ArrayList)5 AbstractExpression (org.voltdb.expressions.AbstractExpression)5 HashSet (java.util.HashSet)4 JSONException (org.json_voltpatches.JSONException)4 Procedure (org.voltdb.catalog.Procedure)4 Statement (org.voltdb.catalog.Statement)4 VoltCompilerException (org.voltdb.compiler.VoltCompiler.VoltCompilerException)4 ConstraintType (org.voltdb.types.ConstraintType)4 HashMap (java.util.HashMap)3 VoltXMLElement (org.hsqldb_voltpatches.VoltXMLElement)3 VoltType (org.voltdb.VoltType)3 TupleValueExpression (org.voltdb.expressions.TupleValueExpression)3 SQLStmt (org.voltdb.SQLStmt)2 VoltSystemProcedure (org.voltdb.VoltSystemProcedure)2 VoltTable (org.voltdb.VoltTable)2