Search in sources :

Example 6 with HSQLParseException

use of org.hsqldb_voltpatches.HSQLInterface.HSQLParseException in project voltdb by VoltDB.

the class StatementDMQL method voltGetXMLExpression.

static VoltXMLElement voltGetXMLExpression(QueryExpression queryExpr, ExpressionColumn[] parameters, Session session) throws HSQLParseException {
    // "select" statements/clauses are always represented by a QueryExpression of type QuerySpecification.
    // The only other instances of QueryExpression are direct QueryExpression instances instantiated in XreadSetOperation
    // to represent UNION, etc.
    int exprType = queryExpr.getUnionType();
    if (exprType == QueryExpression.NOUNION) {
        // "select" statements/clauses are always represented by a QueryExpression of type QuerySpecification.
        if (!(queryExpr instanceof QuerySpecification)) {
            throw new HSQLParseException(queryExpr.operatorName() + " is not supported.");
        }
        QuerySpecification select = (QuerySpecification) queryExpr;
        return voltGetXMLSpecification(select, parameters, session);
    }
    if (exprType == QueryExpression.UNION || exprType == QueryExpression.UNION_ALL || exprType == QueryExpression.EXCEPT || exprType == QueryExpression.EXCEPT_ALL || exprType == QueryExpression.INTERSECT || exprType == QueryExpression.INTERSECT_ALL) {
        VoltXMLElement unionExpr = new VoltXMLElement("union");
        unionExpr.attributes.put("uniontype", queryExpr.operatorName());
        VoltXMLElement leftExpr = voltGetXMLExpression(queryExpr.getLeftQueryExpression(), parameters, session);
        VoltXMLElement rightExpr = voltGetXMLExpression(queryExpr.getRightQueryExpression(), parameters, session);
        // parameters
        voltAppendParameters(session, unionExpr, parameters);
        // Limit/Offset
        List<VoltXMLElement> limitOffsetXml = voltGetLimitOffsetXMLFromSortAndSlice(session, queryExpr.sortAndSlice);
        for (VoltXMLElement elem : limitOffsetXml) {
            unionExpr.children.add(elem);
        }
        // Order By
        if (queryExpr.sortAndSlice.getOrderLength() > 0) {
            List<Expression> displayCols = getDisplayColumnsForSetOp(queryExpr);
            SimpleColumnContext context = new SimpleColumnContext(session, displayCols);
            VoltXMLElement orderCols = new VoltXMLElement("ordercolumns");
            unionExpr.children.add(orderCols);
            for (int i = 0; i < queryExpr.sortAndSlice.exprList.size(); ++i) {
                Expression e = (Expression) queryExpr.sortAndSlice.exprList.get(i);
                assert (e.getLeftNode() != null);
                // Get the display column with a corresponding index
                int index = e.getLeftNode().queryTableColumnIndex;
                assert (index < displayCols.size());
                Expression column = displayCols.get(index);
                e.setLeftNode(column);
                VoltXMLElement xml = e.voltGetXML(context.withStartKey(i), null);
                orderCols.children.add(xml);
            }
        }
        /**
             * Try to merge parent and the child nodes for UNION and INTERSECT (ALL) set operation
             * only if they don't have their own limit/offset/order by clauses
             * In case of EXCEPT(ALL) operation only the left child can be merged with the parent in order to preserve
             * associativity - (Select1 EXCEPT Select2) EXCEPT Select3 vs. Select1 EXCEPT (Select2 EXCEPT Select3)
             */
        boolean canMergeLeft = !hasLimitOrOrder(leftExpr);
        if (canMergeLeft && "union".equalsIgnoreCase(leftExpr.name) && queryExpr.operatorName().equalsIgnoreCase(leftExpr.attributes.get("uniontype"))) {
            unionExpr.children.addAll(leftExpr.children);
        } else {
            unionExpr.children.add(leftExpr);
        }
        boolean canMergeRight = !hasLimitOrOrder(rightExpr);
        if (canMergeRight && exprType != QueryExpression.EXCEPT && exprType != QueryExpression.EXCEPT_ALL && "union".equalsIgnoreCase(rightExpr.name) && queryExpr.operatorName().equalsIgnoreCase(rightExpr.attributes.get("uniontype"))) {
            unionExpr.children.addAll(rightExpr.children);
        } else {
            unionExpr.children.add(rightExpr);
        }
        return unionExpr;
    }
    throw new HSQLParseException(queryExpr.operatorName() + " tuple set operator is not supported.");
}
Also used : SimpleColumnContext(org.hsqldb_voltpatches.Expression.SimpleColumnContext) HSQLParseException(org.hsqldb_voltpatches.HSQLInterface.HSQLParseException)

Example 7 with HSQLParseException

use of org.hsqldb_voltpatches.HSQLInterface.HSQLParseException in project voltdb by VoltDB.

the class StatementDMQL method voltGetXMLSpecification.

private static VoltXMLElement voltGetXMLSpecification(QuerySpecification select, ExpressionColumn[] parameters, Session session) throws HSQLParseException {
    // select
    VoltXMLElement query = new VoltXMLElement("select");
    if (select.isDistinctSelect) {
        query.attributes.put("distinct", "true");
    }
    List<VoltXMLElement> limitOffsetXml = voltGetLimitOffsetXMLFromSortAndSlice(session, select.sortAndSlice);
    for (VoltXMLElement elem : limitOffsetXml) {
        query.children.add(elem);
    }
    // columns
    VoltXMLElement cols = new VoltXMLElement("columns");
    query.children.add(cols);
    java.util.ArrayList<Expression> orderByCols = new java.util.ArrayList<>();
    java.util.ArrayList<Expression> groupByCols = new java.util.ArrayList<>();
    select.displayCols.clear();
    java.util.ArrayList<Pair<Integer, HsqlNameManager.SimpleName>> aliases = new java.util.ArrayList<>();
    /*
         * select.exprColumn stores all of the columns needed by HSQL to
         * calculate the query's result set. It contains more than just the
         * columns in the output; for example, it contains columns representing
         * aliases, columns for groups, etc.
         *
         * Volt uses multiple collections to organize these columns.
         *
         * Observing this loop in a debugger, the following seems true:
         *
         * 1. Columns in exprColumns that appear in the output schema, appear in
         * exprColumns in the same order that they occur in the output schema.
         *
         * 2. expr.columnIndex is an index back in to the select.exprColumns
         * array. This allows multiple exprColumn entries to refer to each
         * other; for example, an OpType.SIMPLE_COLUMN type storing an alias
         * will have its columnIndex set to the offset of the expr it aliases.
         */
    for (int i = 0; i < select.exprColumns.length; i++) {
        final Expression expr = select.exprColumns[i];
        if (expr.alias != null) {
            /*
                 * Remember how aliases relate to columns. Will iterate again later
                 * and mutate the exprColumn entries setting the alias string on the aliased
                 * column entry.
                 */
            if (expr instanceof ExpressionColumn) {
                ExpressionColumn exprColumn = (ExpressionColumn) expr;
                if (exprColumn.alias != null && exprColumn.columnName == null) {
                    aliases.add(Pair.of(expr.columnIndex, expr.alias));
                }
            } else if (expr.columnIndex > -1) {
                /*
                     * Only add it to the list of aliases that need to be
                     * propagated to columns if the column index is valid.
                     * ExpressionArithmetic will have an alias but not
                     * necessarily a column index.
                     */
                aliases.add(Pair.of(expr.columnIndex, expr.alias));
            }
        }
        // it's easier to patch up display column ordering later.
        if (expr.columnIndex == -1) {
            expr.columnIndex = i;
        }
        if (isGroupByColumn(select, i)) {
            groupByCols.add(expr);
        } else if (expr.opType == OpTypes.ORDER_BY) {
            if (select.sortAndSlice.hasOrder())
                // If the selectQuerySpecification's sort structure has been reset,
                // do not add orderByCols.
                orderByCols.add(expr);
        } else if (expr.equals(select.getHavingCondition())) {
            // Having
            if (!(expr instanceof ExpressionLogical && expr.isAggregate)) {
                throw new HSQLParseException("VoltDB does not support HAVING clause without aggregation. " + "Consider using WHERE clause if possible");
            }
        } else if (expr.opType != OpTypes.SIMPLE_COLUMN || (expr.isAggregate && expr.alias != null)) {
            // Add aggregate aliases to the display columns to maintain
            // the output schema column ordering.
            select.displayCols.add(expr);
        }
    // else, other simple columns are ignored. If others exist, maybe
    // volt infers a display column from another column collection?
    }
    for (Pair<Integer, HsqlNameManager.SimpleName> alias : aliases) {
        // set the alias data into the expression being aliased.
        select.exprColumns[alias.getFirst()].alias = alias.getSecond();
    }
    /*
         * The columns chosen above as display columns aren't always the same
         * expr objects HSQL would use as display columns - some data were
         * unified (namely, SIMPLE_COLUMN aliases were pushed into COLUMNS).
         *
         * However, the correct output schema ordering was correct in exprColumns.
         * This order was maintained by adding SIMPLE_COLUMNs to displayCols.
         *
         * Now need to serialize the displayCols, serializing the non-simple-columns
         * corresponding to simple_columns for any simple_columns that woodchucks
         * could chuck.
         *
         * Serialize the display columns in the exprColumn order.
         */
    SimpleColumnContext context = new SimpleColumnContext(session, select.displayCols);
    // having
    Expression havingCondition = select.getHavingCondition();
    if (havingCondition != null) {
        VoltXMLElement having = new VoltXMLElement("having");
        query.children.add(having);
        VoltXMLElement havingExpr = havingCondition.voltGetXML(context.withStartKey(0), null);
        having.children.add(havingExpr);
    }
    for (int jj = 0; jj < select.displayCols.size(); ++jj) {
        Expression expr = select.displayCols.get(jj);
        if (context.disabledTheColumnForDisplay(jj)) {
            continue;
        }
        VoltXMLElement xml = expr.voltGetXML(context.withStartKey(jj), null);
        cols.children.add(xml);
        assert (xml != null);
    }
    // parameters
    voltAppendParameters(session, query, parameters);
    // scans
    VoltXMLElement scans = new VoltXMLElement("tablescans");
    query.children.add(scans);
    assert (scans != null);
    for (RangeVariable rangeVariable : select.rangeVariables) {
        scans.children.add(rangeVariable.voltGetRangeVariableXML(session));
    }
    // groupby
    if (select.isGrouped) {
        VoltXMLElement groupCols = new VoltXMLElement("groupcolumns");
        query.children.add(groupCols);
        for (int jj = 0; jj < groupByCols.size(); ++jj) {
            Expression expr = groupByCols.get(jj);
            VoltXMLElement xml = expr.voltGetXML(context.withStartKey(jj), null);
            groupCols.children.add(xml);
        }
    }
    // orderby
    if (orderByCols.size() > 0) {
        VoltXMLElement orderCols = new VoltXMLElement("ordercolumns");
        query.children.add(orderCols);
        for (int jj = 0; jj < orderByCols.size(); ++jj) {
            Expression expr = orderByCols.get(jj);
            VoltXMLElement xml = expr.voltGetXML(context.withStartKey(jj), null);
            orderCols.children.add(xml);
        }
    }
    // the effects of a join if there is only one table scan in the statement.
    if (scans.children.size() > 1) {
        List<VoltXMLElement> exprCols = query.extractSubElements("operation", "optype", "operator_case_when");
        resolveUsingExpressions(exprCols, select.rangeVariables);
    }
    return query;
}
Also used : ArrayList(java.util.ArrayList) HSQLParseException(org.hsqldb_voltpatches.HSQLInterface.HSQLParseException) SimpleColumnContext(org.hsqldb_voltpatches.Expression.SimpleColumnContext)

Example 8 with HSQLParseException

use of org.hsqldb_voltpatches.HSQLInterface.HSQLParseException in project voltdb by VoltDB.

the class MaterializedViewProcessor method startProcessing.

/**
     * Add materialized view info to the catalog for the tables that are
     * materialized views.
     * @throws VoltCompilerException
     */
public void startProcessing(Database db, HashMap<Table, String> matViewMap, TreeSet<String> exportTableNames) throws VoltCompilerException {
    HashSet<String> viewTableNames = new HashSet<>();
    for (Entry<Table, String> entry : matViewMap.entrySet()) {
        viewTableNames.add(entry.getKey().getTypeName());
    }
    for (Entry<Table, String> entry : matViewMap.entrySet()) {
        Table destTable = entry.getKey();
        String query = entry.getValue();
        // get the xml for the query
        VoltXMLElement xmlquery = null;
        try {
            xmlquery = m_hsql.getXMLCompiledStatement(query);
        } catch (HSQLParseException e) {
            e.printStackTrace();
        }
        assert (xmlquery != null);
        // parse the xml like any other sql statement
        ParsedSelectStmt stmt = null;
        try {
            stmt = (ParsedSelectStmt) AbstractParsedStmt.parse(query, xmlquery, null, db, null);
        } catch (Exception e) {
            throw m_compiler.new VoltCompilerException(e.getMessage());
        }
        assert (stmt != null);
        String viewName = destTable.getTypeName();
        // throw an error if the view isn't within voltdb's limited world view
        checkViewMeetsSpec(viewName, stmt);
        // The primary key index is yet to be defined (below).
        for (Index destIndex : destTable.getIndexes()) {
            if (destIndex.getUnique() || destIndex.getAssumeunique()) {
                String msg = "A UNIQUE or ASSUMEUNIQUE index is not allowed on a materialized view. " + "Remove the qualifier from the index " + destIndex.getTypeName() + "defined on the materialized view \"" + viewName + "\".";
                throw m_compiler.new VoltCompilerException(msg);
            }
        }
        // A Materialized view cannot depend on another view.
        for (Table srcTable : stmt.m_tableList) {
            if (viewTableNames.contains(srcTable.getTypeName())) {
                String msg = String.format("A materialized view (%s) can not be defined on another view (%s).", viewName, srcTable.getTypeName());
                throw m_compiler.new VoltCompilerException(msg);
            }
        }
        // The existing code base still need this materializer field to tell if a table
        // is a materialized view table. Leaving this for future refactoring.
        destTable.setMaterializer(stmt.m_tableList.get(0));
        List<Column> destColumnArray = CatalogUtil.getSortedCatalogItems(destTable.getColumns(), "index");
        List<AbstractExpression> groupbyExprs = null;
        if (stmt.hasComplexGroupby()) {
            groupbyExprs = new ArrayList<>();
            for (ParsedColInfo col : stmt.groupByColumns()) {
                groupbyExprs.add(col.expression);
            }
        }
        // Generate query XMLs for min/max recalculation (ENG-8641)
        boolean isMultiTableView = stmt.m_tableList.size() > 1;
        MatViewFallbackQueryXMLGenerator xmlGen = new MatViewFallbackQueryXMLGenerator(xmlquery, stmt.groupByColumns(), stmt.m_displayColumns, isMultiTableView);
        List<VoltXMLElement> fallbackQueryXMLs = xmlGen.getFallbackQueryXMLs();
        // index or constraint in order to avoid error and crash.
        if (stmt.groupByColumns().size() != 0) {
            Index pkIndex = destTable.getIndexes().add(HSQLInterface.AUTO_GEN_MATVIEW_IDX);
            pkIndex.setType(IndexType.BALANCED_TREE.getValue());
            pkIndex.setUnique(true);
            // assume index 1 throuh #grpByCols + 1 are the cols
            for (int i = 0; i < stmt.groupByColumns().size(); i++) {
                ColumnRef c = pkIndex.getColumns().add(String.valueOf(i));
                c.setColumn(destColumnArray.get(i));
                c.setIndex(i);
            }
            Constraint pkConstraint = destTable.getConstraints().add(HSQLInterface.AUTO_GEN_MATVIEW_CONST);
            pkConstraint.setType(ConstraintType.PRIMARY_KEY.getValue());
            pkConstraint.setIndex(pkIndex);
        }
        // If we have an unsafe MV message, then
        // remember it here.  We don't really know how
        // to transfer the message through the catalog, but
        // we can transmit the existence of the message.
        boolean isSafeForDDL = (stmt.getUnsafeMVMessage() == null);
        // Here the code path diverges for different kinds of views (single table view and joined table view)
        if (isMultiTableView) {
            // Materialized view on joined tables
            // Add mvHandlerInfo to the destTable:
            MaterializedViewHandlerInfo mvHandlerInfo = destTable.getMvhandlerinfo().add("mvHandlerInfo");
            mvHandlerInfo.setDesttable(destTable);
            for (Table srcTable : stmt.m_tableList) {
                // Now we do not support having a view on persistent tables joining streamed tables.
                if (exportTableNames.contains(srcTable.getTypeName())) {
                    String msg = String.format("A materialized view (%s) on joined tables cannot have streamed table (%s) as its source.", viewName, srcTable.getTypeName());
                    throw m_compiler.new VoltCompilerException(msg);
                }
                // The view table will need to keep a list of its source tables.
                // The list is used to install / uninstall the view reference on the source tables when the
                // view handler is constructed / destroyed.
                TableRef tableRef = mvHandlerInfo.getSourcetables().add(srcTable.getTypeName());
                tableRef.setTable(srcTable);
                // There could be more than one partition column candidate, but we will only use the first one we found.
                if (destTable.getPartitioncolumn() == null && srcTable.getPartitioncolumn() != null) {
                    Column partitionColumn = srcTable.getPartitioncolumn();
                    String partitionColName = partitionColumn.getTypeName();
                    String srcTableName = srcTable.getTypeName();
                    destTable.setIsreplicated(false);
                    if (stmt.hasComplexGroupby()) {
                        for (int i = 0; i < groupbyExprs.size(); i++) {
                            AbstractExpression groupbyExpr = groupbyExprs.get(i);
                            if (groupbyExpr instanceof TupleValueExpression) {
                                TupleValueExpression tve = (TupleValueExpression) groupbyExpr;
                                if (tve.getTableName().equals(srcTableName) && tve.getColumnName().equals(partitionColName)) {
                                    // The partition column is set to destColumnArray.get(i), because we have the restriction
                                    // that the non-aggregate columns must come at the very begining, and must exactly match
                                    // the group-by columns.
                                    // If we are going to remove this restriction in the future, then we need to do more work
                                    // in order to find a proper partition column.
                                    destTable.setPartitioncolumn(destColumnArray.get(i));
                                    break;
                                }
                            }
                        }
                    } else {
                        for (int i = 0; i < stmt.groupByColumns().size(); i++) {
                            ParsedColInfo gbcol = stmt.groupByColumns().get(i);
                            if (gbcol.tableName.equals(srcTableName) && gbcol.columnName.equals(partitionColName)) {
                                destTable.setPartitioncolumn(destColumnArray.get(i));
                                break;
                            }
                        }
                    }
                }
            // end find partition column
            }
            // end for each source table
            compileFallbackQueriesAndUpdateCatalog(db, query, fallbackQueryXMLs, mvHandlerInfo);
            compileCreateQueryAndUpdateCatalog(db, query, xmlquery, mvHandlerInfo);
            mvHandlerInfo.setGroupbycolumncount(stmt.groupByColumns().size());
            for (int i = 0; i < stmt.m_displayColumns.size(); i++) {
                ParsedColInfo col = stmt.m_displayColumns.get(i);
                Column destColumn = destColumnArray.get(i);
                setTypeAttributesForColumn(destColumn, col.expression);
                // Set the expression type here to determine the behavior of the merge function.
                destColumn.setAggregatetype(col.expression.getExpressionType().getValue());
            }
            mvHandlerInfo.setIssafewithnonemptysources(isSafeForDDL);
        } else {
            // =======================================================================================
            // Materialized view on single table
            // create the materializedviewinfo catalog node for the source table
            Table srcTable = stmt.m_tableList.get(0);
            MaterializedViewInfo matviewinfo = srcTable.getViews().add(viewName);
            matviewinfo.setDest(destTable);
            AbstractExpression where = stmt.getSingleTableFilterExpression();
            if (where != null) {
                String hex = Encoder.hexEncode(where.toJSONString());
                matviewinfo.setPredicate(hex);
            } else {
                matviewinfo.setPredicate("");
            }
            List<Column> srcColumnArray = CatalogUtil.getSortedCatalogItems(srcTable.getColumns(), "index");
            if (stmt.hasComplexGroupby()) {
                // Parse group by expressions to json string
                String groupbyExprsJson = null;
                try {
                    groupbyExprsJson = DDLCompiler.convertToJSONArray(groupbyExprs);
                } catch (JSONException e) {
                    throw m_compiler.new VoltCompilerException("Unexpected error serializing non-column " + "expressions for group by expressions: " + e.toString());
                }
                matviewinfo.setGroupbyexpressionsjson(groupbyExprsJson);
            } else {
                // add the group by columns from the src table
                for (int i = 0; i < stmt.groupByColumns().size(); i++) {
                    ParsedColInfo gbcol = stmt.groupByColumns().get(i);
                    Column srcCol = srcColumnArray.get(gbcol.index);
                    ColumnRef cref = matviewinfo.getGroupbycols().add(srcCol.getTypeName());
                    // groupByColumns is iterating in order of groups. Store that grouping order
                    // in the column ref index. When the catalog is serialized, it will, naturally,
                    // scramble this order like a two year playing dominos, presenting the data
                    // in a meaningless sequence.
                    // the column offset in the view's grouping order
                    cref.setIndex(i);
                    // the source column from the base (non-view) table
                    cref.setColumn(srcCol);
                    // parse out the group by columns into the dest table
                    ParsedColInfo col = stmt.m_displayColumns.get(i);
                    Column destColumn = destColumnArray.get(i);
                    processMaterializedViewColumn(srcTable, destColumn, ExpressionType.VALUE_TUPLE, (TupleValueExpression) col.expression);
                }
            }
            // Set up COUNT(*) column
            ParsedColInfo countCol = stmt.m_displayColumns.get(stmt.groupByColumns().size());
            assert (countCol.expression.getExpressionType() == ExpressionType.AGGREGATE_COUNT_STAR);
            assert (countCol.expression.getLeft() == null);
            processMaterializedViewColumn(srcTable, destColumnArray.get(stmt.groupByColumns().size()), ExpressionType.AGGREGATE_COUNT_STAR, null);
            // prepare info for aggregation columns.
            List<AbstractExpression> aggregationExprs = new ArrayList<>();
            boolean hasAggregationExprs = false;
            ArrayList<AbstractExpression> minMaxAggs = new ArrayList<>();
            for (int i = stmt.groupByColumns().size() + 1; i < stmt.m_displayColumns.size(); i++) {
                ParsedColInfo col = stmt.m_displayColumns.get(i);
                AbstractExpression aggExpr = col.expression.getLeft();
                if (aggExpr.getExpressionType() != ExpressionType.VALUE_TUPLE) {
                    hasAggregationExprs = true;
                }
                aggregationExprs.add(aggExpr);
                if (col.expression.getExpressionType() == ExpressionType.AGGREGATE_MIN || col.expression.getExpressionType() == ExpressionType.AGGREGATE_MAX) {
                    minMaxAggs.add(aggExpr);
                }
            }
            compileFallbackQueriesAndUpdateCatalog(db, query, fallbackQueryXMLs, matviewinfo);
            // set Aggregation Expressions.
            if (hasAggregationExprs) {
                String aggregationExprsJson = null;
                try {
                    aggregationExprsJson = DDLCompiler.convertToJSONArray(aggregationExprs);
                } catch (JSONException e) {
                    throw m_compiler.new VoltCompilerException("Unexpected error serializing non-column " + "expressions for aggregation expressions: " + e.toString());
                }
                matviewinfo.setAggregationexpressionsjson(aggregationExprsJson);
            }
            // Find index for each min/max aggCol/aggExpr (ENG-6511 and ENG-8512)
            for (Integer i = 0; i < minMaxAggs.size(); ++i) {
                Index found = findBestMatchIndexForMatviewMinOrMax(matviewinfo, srcTable, groupbyExprs, minMaxAggs.get(i));
                IndexRef refFound = matviewinfo.getIndexforminmax().add(i.toString());
                if (found != null) {
                    refFound.setName(found.getTypeName());
                } else {
                    refFound.setName("");
                }
            }
            // The COUNT(*) should return a BIGINT column, whereas we found here the COUNT(*) was assigned a INTEGER column.
            for (int i = 0; i <= stmt.groupByColumns().size(); i++) {
                ParsedColInfo col = stmt.m_displayColumns.get(i);
                Column destColumn = destColumnArray.get(i);
                setTypeAttributesForColumn(destColumn, col.expression);
            }
            // parse out the aggregation columns into the dest table
            for (int i = stmt.groupByColumns().size() + 1; i < stmt.m_displayColumns.size(); i++) {
                ParsedColInfo col = stmt.m_displayColumns.get(i);
                Column destColumn = destColumnArray.get(i);
                AbstractExpression colExpr = col.expression.getLeft();
                TupleValueExpression tve = null;
                if (colExpr.getExpressionType() == ExpressionType.VALUE_TUPLE) {
                    tve = (TupleValueExpression) colExpr;
                }
                processMaterializedViewColumn(srcTable, destColumn, col.expression.getExpressionType(), tve);
                setTypeAttributesForColumn(destColumn, col.expression);
            }
            if (srcTable.getPartitioncolumn() != null) {
                // Set the partitioning of destination tables of associated views.
                // If a view's source table is replicated, then a full scan of the
                // associated view is single-sited. If the source is partitioned,
                // a full scan of the view must be distributed, unless it is filtered
                // by the original table's partitioning key, which, to be filtered,
                // must also be a GROUP BY key.
                destTable.setIsreplicated(false);
                setGroupedTablePartitionColumn(matviewinfo, srcTable.getPartitioncolumn());
            }
            matviewinfo.setIssafewithnonemptysources(isSafeForDDL);
        }
    // end if single table view materialized view.
    }
}
Also used : Constraint(org.voltdb.catalog.Constraint) ArrayList(java.util.ArrayList) Index(org.voltdb.catalog.Index) VoltXMLElement(org.hsqldb_voltpatches.VoltXMLElement) HSQLParseException(org.hsqldb_voltpatches.HSQLInterface.HSQLParseException) IndexRef(org.voltdb.catalog.IndexRef) ParsedColInfo(org.voltdb.planner.ParsedColInfo) Column(org.voltdb.catalog.Column) ParsedSelectStmt(org.voltdb.planner.ParsedSelectStmt) VoltCompilerException(org.voltdb.compiler.VoltCompiler.VoltCompilerException) HashSet(java.util.HashSet) MaterializedViewInfo(org.voltdb.catalog.MaterializedViewInfo) TupleValueExpression(org.voltdb.expressions.TupleValueExpression) Table(org.voltdb.catalog.Table) JSONException(org.json_voltpatches.JSONException) VoltCompilerException(org.voltdb.compiler.VoltCompiler.VoltCompilerException) JSONException(org.json_voltpatches.JSONException) HSQLParseException(org.hsqldb_voltpatches.HSQLInterface.HSQLParseException) Constraint(org.voltdb.catalog.Constraint) AbstractExpression(org.voltdb.expressions.AbstractExpression) ColumnRef(org.voltdb.catalog.ColumnRef) MaterializedViewHandlerInfo(org.voltdb.catalog.MaterializedViewHandlerInfo) TableRef(org.voltdb.catalog.TableRef)

Example 9 with HSQLParseException

use of org.hsqldb_voltpatches.HSQLInterface.HSQLParseException in project voltdb by VoltDB.

the class TestDDLCompiler method testCharIsNotAllowed.

public void testCharIsNotAllowed() {
    String ddl1 = "CREATE TABLE warehouse ( " + "w_street_1 char(32) default NULL, " + ");";
    HSQLInterface hsql = HSQLInterface.loadHsqldb();
    try {
        hsql.runDDLCommand(ddl1);
    } catch (HSQLParseException e) {
        assertTrue(true);
        return;
    }
    fail();
}
Also used : HSQLInterface(org.hsqldb_voltpatches.HSQLInterface) HSQLParseException(org.hsqldb_voltpatches.HSQLInterface.HSQLParseException)

Example 10 with HSQLParseException

use of org.hsqldb_voltpatches.HSQLInterface.HSQLParseException in project voltdb by VoltDB.

the class TestParsedStatements method runSQLTest.

void runSQLTest(String stmtName, String stmtSQL) {
    // use HSQLDB to get XML that describes the semantics of the statement
    // this is much easier to parse than SQL and is checked against the catalog
    VoltXMLElement xmlSQL = null;
    try {
        xmlSQL = m_hsql.getXMLCompiledStatement(stmtSQL);
    } catch (HSQLParseException e) {
        e.printStackTrace();
        assertTrue(false);
    }
    // output the xml from hsql to disk for debugging
    BuildDirectoryUtils.writeFile("statement-hsql-xml", stmtName + ".xml", xmlSQL.toString(), true);
    // get a parsed statement from the xml
    AbstractParsedStmt parsedStmt = AbstractParsedStmt.parse(stmtSQL, xmlSQL, null, m_db, null);
    // except for "insert" statements that currently do without a joinTree.
    if (parsedStmt.m_joinTree != null) {
        parsedStmt.m_joinTree.analyzeJoinExpressions(parsedStmt.m_noTableSelectionList);
    }
    // output a description of the parsed stmt
    BuildDirectoryUtils.writeFile("statement-hsql-parsed", stmtName + ".txt", parsedStmt.toString(), true);
    assertTrue(parsedStmt.m_noTableSelectionList.isEmpty());
    System.out.println(parsedStmt.toString());
}
Also used : VoltXMLElement(org.hsqldb_voltpatches.VoltXMLElement) HSQLParseException(org.hsqldb_voltpatches.HSQLInterface.HSQLParseException)

Aggregations

HSQLParseException (org.hsqldb_voltpatches.HSQLInterface.HSQLParseException)13 UnsupportedEncodingException (java.io.UnsupportedEncodingException)2 URL (java.net.URL)2 ArrayList (java.util.ArrayList)2 SimpleColumnContext (org.hsqldb_voltpatches.Expression.SimpleColumnContext)2 VoltXMLElement (org.hsqldb_voltpatches.VoltXMLElement)2 VoltCompilerException (org.voltdb.compiler.VoltCompiler.VoltCompilerException)2 CompiledPlan (org.voltdb.planner.CompiledPlan)2 PlanningErrorException (org.voltdb.planner.PlanningErrorException)2 QueryPlanner (org.voltdb.planner.QueryPlanner)2 TrivialCostModel (org.voltdb.planner.TrivialCostModel)2 IOException (java.io.IOException)1 BigDecimal (java.math.BigDecimal)1 HashSet (java.util.HashSet)1 HSQLDDLInfo (org.hsqldb_voltpatches.HSQLDDLInfo)1 HSQLInterface (org.hsqldb_voltpatches.HSQLInterface)1 VoltXMLDiff (org.hsqldb_voltpatches.VoltXMLElement.VoltXMLDiff)1 Index (org.hsqldb_voltpatches.index.Index)1 HsqlArrayList (org.hsqldb_voltpatches.lib.HsqlArrayList)1 BinaryData (org.hsqldb_voltpatches.types.BinaryData)1