Search in sources :

Example 6 with ColumnRef

use of org.voltdb.catalog.ColumnRef in project voltdb by VoltDB.

the class DDLCompiler method addIndexToCatalog.

private static void addIndexToCatalog(Database db, Table table, VoltXMLElement node, Map<String, String> indexReplacementMap, HashMap<String, Index> indexMap, HashMap<String, Column> columnMap, VoltCompiler compiler) throws VoltCompilerException {
    assert node.name.equals("index");
    String name = node.attributes.get("name");
    boolean unique = Boolean.parseBoolean(node.attributes.get("unique"));
    boolean assumeUnique = Boolean.parseBoolean(node.attributes.get("assumeunique"));
    AbstractParsedStmt dummy = new ParsedSelectStmt(null, db);
    dummy.setDDLIndexedTable(table);
    StringBuffer msg = new StringBuffer(String.format("Index \"%s\" ", name));
    // "parse" the expression trees for an expression-based index (vs. a simple column value index)
    List<AbstractExpression> exprs = null;
    // "parse" the WHERE expression for partial index if any
    AbstractExpression predicate = null;
    // Some expressions have special validation in indices.  Not all the expression
    // can be indexed. We scan for result type at first here and block those which
    // can't be indexed like boolean, geo ... We gather rest of expression into
    // checkExpressions list.  We will check on them all at once.
    List<AbstractExpression> checkExpressions = new ArrayList<>();
    for (VoltXMLElement subNode : node.children) {
        if (subNode.name.equals("exprs")) {
            exprs = new ArrayList<>();
            for (VoltXMLElement exprNode : subNode.children) {
                AbstractExpression expr = dummy.parseExpressionTree(exprNode);
                expr.resolveForTable(table);
                expr.finalizeValueTypes();
                // string will be populated with an expression's details when
                // its value type is not indexable
                StringBuffer exprMsg = new StringBuffer();
                if (!expr.isValueTypeIndexable(exprMsg)) {
                    // indexing on expression with boolean result is not supported.
                    throw compiler.new VoltCompilerException("Cannot create index \"" + name + "\" because it contains " + exprMsg + ", which is not supported.");
                }
                if ((unique || assumeUnique) && !expr.isValueTypeUniqueIndexable(exprMsg)) {
                    // indexing on expression with boolean result is not supported.
                    throw compiler.new VoltCompilerException("Cannot create unique index \"" + name + "\" because it contains " + exprMsg + ", which is not supported.");
                }
                // rest of the validity guards will be evaluated after collecting all the expressions.
                checkExpressions.add(expr);
                exprs.add(expr);
            }
        } else if (subNode.name.equals("predicate")) {
            assert (subNode.children.size() == 1);
            VoltXMLElement predicateXML = subNode.children.get(0);
            assert (predicateXML != null);
            predicate = buildPartialIndexPredicate(dummy, name, predicateXML, table, compiler);
        }
    }
    // Check all the subexpressions we gathered up.
    if (!AbstractExpression.validateExprsForIndexesAndMVs(checkExpressions, msg)) {
        // The error message will be in the StringBuffer msg.
        throw compiler.new VoltCompilerException(msg.toString());
    }
    String colList = node.attributes.get("columns");
    String[] colNames = colList.split(",");
    Column[] columns = new Column[colNames.length];
    boolean has_nonint_col = false;
    boolean has_geo_col = false;
    String nonint_col_name = null;
    for (int i = 0; i < colNames.length; i++) {
        columns[i] = columnMap.get(colNames[i]);
        if (columns[i] == null) {
            return;
        }
    }
    UnsafeOperatorsForDDL unsafeOps = new UnsafeOperatorsForDDL();
    if (exprs == null) {
        for (int i = 0; i < colNames.length; i++) {
            VoltType colType = VoltType.get((byte) columns[i].getType());
            if (!colType.isIndexable()) {
                String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " values are not currently supported as index keys: \"" + colNames[i] + "\"";
                throw compiler.new VoltCompilerException(emsg);
            }
            if ((unique || assumeUnique) && !colType.isUniqueIndexable()) {
                String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " values are not currently supported as unique index keys: \"" + colNames[i] + "\"";
                throw compiler.new VoltCompilerException(emsg);
            }
            if (!colType.isBackendIntegerType()) {
                has_nonint_col = true;
                nonint_col_name = colNames[i];
                has_geo_col = colType.equals(VoltType.GEOGRAPHY);
                if (has_geo_col && colNames.length > 1) {
                    String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " values must be the only component of an index key: \"" + nonint_col_name + "\"";
                    throw compiler.new VoltCompilerException(emsg);
                }
            }
        }
    } else {
        for (AbstractExpression expression : exprs) {
            VoltType colType = expression.getValueType();
            if (!colType.isIndexable()) {
                String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " valued expressions are not currently supported as index keys.";
                throw compiler.new VoltCompilerException(emsg);
            }
            if ((unique || assumeUnique) && !colType.isUniqueIndexable()) {
                String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " valued expressions are not currently supported as unique index keys.";
                throw compiler.new VoltCompilerException(emsg);
            }
            if (!colType.isBackendIntegerType()) {
                has_nonint_col = true;
                nonint_col_name = "<expression>";
                has_geo_col = colType.equals(VoltType.GEOGRAPHY);
                if (has_geo_col) {
                    if (exprs.size() > 1) {
                        String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " values must be the only component of an index key.";
                        throw compiler.new VoltCompilerException(emsg);
                    } else if (!(expression instanceof TupleValueExpression)) {
                        String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " expressions must be simple column expressions.";
                        throw compiler.new VoltCompilerException(emsg);
                    }
                }
            }
            expression.findUnsafeOperatorsForDDL(unsafeOps);
        }
    }
    Index index = table.getIndexes().add(name);
    index.setCountable(false);
    index.setIssafewithnonemptysources(!unsafeOps.isUnsafe());
    // Set the index type.  It will be one of:
    // - Covering cell index (geo index for CONTAINS predicates)
    // - HASH index (set in HSQL because "hash" is in the name of the
    //   constraint or the index
    // - TREE index, which is the default
    boolean isHashIndex = node.attributes.get("ishashindex").equals("true");
    if (has_geo_col) {
        index.setType(IndexType.COVERING_CELL_INDEX.getValue());
    } else if (isHashIndex) {
        // warn user that hash index will be deprecated
        compiler.addWarn("Hash indexes are deprecated. In a future release, VoltDB will only support tree indexes, even if the index name contains the string \"hash\"");
        // make the index a hash.
        if (has_nonint_col) {
            String emsg = "Index " + name + " in table " + table.getTypeName() + " uses a non-hashable column " + nonint_col_name;
            throw compiler.new VoltCompilerException(emsg);
        }
        index.setType(IndexType.HASH_TABLE.getValue());
    } else {
        index.setType(IndexType.BALANCED_TREE.getValue());
        index.setCountable(true);
    }
    // but they still represent the columns that will trigger an index update when their values change.
    for (int i = 0; i < columns.length; i++) {
        ColumnRef cref = index.getColumns().add(columns[i].getTypeName());
        cref.setColumn(columns[i]);
        cref.setIndex(i);
    }
    if (exprs != null) {
        try {
            index.setExpressionsjson(convertToJSONArray(exprs));
        } catch (JSONException e) {
            throw compiler.new VoltCompilerException("Unexpected error serializing non-column expressions for index '" + name + "' on type '" + table.getTypeName() + "': " + e.toString());
        }
    }
    index.setUnique(unique);
    if (assumeUnique) {
        index.setUnique(true);
    }
    index.setAssumeunique(assumeUnique);
    if (predicate != null) {
        try {
            index.setPredicatejson(convertToJSONObject(predicate));
        } catch (JSONException e) {
            throw compiler.new VoltCompilerException("Unexpected error serializing predicate for partial index '" + name + "' on type '" + table.getTypeName() + "': " + e.toString());
        }
    }
    // will make two indexes different
    for (Index existingIndex : table.getIndexes()) {
        // skip thineself
        if (existingIndex == index) {
            continue;
        }
        if (indexesAreDups(existingIndex, index)) {
            // replace any constraints using one index with the other
            //for () TODO
            // get ready for replacements from constraints created later
            indexReplacementMap.put(index.getTypeName(), existingIndex.getTypeName());
            // if the index is a user-named index...
            if (index.getTypeName().startsWith(HSQLInterface.AUTO_GEN_PREFIX) == false) {
                // on dup-detection, add a warning but don't fail
                String emsg = String.format("Dropping index %s on table %s because it duplicates index %s.", index.getTypeName(), table.getTypeName(), existingIndex.getTypeName());
                compiler.addWarn(emsg);
            }
            // drop the index and GTFO
            table.getIndexes().delete(index.getTypeName());
            return;
        }
    }
    String smsg = "Created index: " + name + " on table: " + table.getTypeName() + " of type: " + IndexType.get(index.getType()).name();
    compiler.addInfo(smsg);
    indexMap.put(name, index);
}
Also used : TupleValueExpression(org.voltdb.expressions.TupleValueExpression) ArrayList(java.util.ArrayList) JSONException(org.json_voltpatches.JSONException) Index(org.voltdb.catalog.Index) VoltXMLElement(org.hsqldb_voltpatches.VoltXMLElement) AbstractParsedStmt(org.voltdb.planner.AbstractParsedStmt) Constraint(org.voltdb.catalog.Constraint) UnsafeOperatorsForDDL(org.voltdb.expressions.AbstractExpression.UnsafeOperatorsForDDL) AbstractExpression(org.voltdb.expressions.AbstractExpression) Column(org.voltdb.catalog.Column) VoltType(org.voltdb.VoltType) ParsedSelectStmt(org.voltdb.planner.ParsedSelectStmt) ColumnRef(org.voltdb.catalog.ColumnRef) VoltCompilerException(org.voltdb.compiler.VoltCompiler.VoltCompilerException)

Example 7 with ColumnRef

use of org.voltdb.catalog.ColumnRef in project voltdb by VoltDB.

the class DDLCompiler method indexesAreDups.

/**
     * Return true if the two indexes are identical with a different name.
     */
private static boolean indexesAreDups(Index idx1, Index idx2) {
    // same attributes?
    if (idx1.getType() != idx2.getType()) {
        return false;
    }
    if (idx1.getCountable() != idx2.getCountable()) {
        return false;
    }
    if (idx1.getUnique() != idx2.getUnique()) {
        return false;
    }
    if (idx1.getAssumeunique() != idx2.getAssumeunique()) {
        return false;
    }
    // same column count?
    if (idx1.getColumns().size() != idx2.getColumns().size()) {
        return false;
    }
    if (!(idx1.getExpressionsjson().equals(idx2.getExpressionsjson()))) {
        return false;
    }
    // Simple column indexes have identical empty expression strings so need to be distinguished other ways.
    // More complex expression indexes that have the same expression strings always have the same set of (base)
    // columns referenced in the same order, but we fall through and check them, anyway.
    // sort in index order the columns of idx1, each identified by its index in the base table
    int[] idx1baseTableOrder = new int[idx1.getColumns().size()];
    for (ColumnRef cref : idx1.getColumns()) {
        int index = cref.getIndex();
        int baseTableIndex = cref.getColumn().getIndex();
        idx1baseTableOrder[index] = baseTableIndex;
    }
    // sort in index order the columns of idx2, each identified by its index in the base table
    int[] idx2baseTableOrder = new int[idx2.getColumns().size()];
    for (ColumnRef cref : idx2.getColumns()) {
        int index = cref.getIndex();
        int baseTableIndex = cref.getColumn().getIndex();
        idx2baseTableOrder[index] = baseTableIndex;
    }
    // Duplicate indexes have identical columns in identical order.
    if (!Arrays.equals(idx1baseTableOrder, idx2baseTableOrder)) {
        return false;
    }
    // Check the predicates
    if (idx1.getPredicatejson().length() > 0) {
        return idx1.getPredicatejson().equals(idx2.getPredicatejson());
    }
    if (idx2.getPredicatejson().length() > 0) {
        return idx2.getPredicatejson().equals(idx1.getPredicatejson());
    }
    return true;
}
Also used : ColumnRef(org.voltdb.catalog.ColumnRef) Constraint(org.voltdb.catalog.Constraint)

Example 8 with ColumnRef

use of org.voltdb.catalog.ColumnRef in project voltdb by VoltDB.

the class MaterializedViewProcessor method findBestMatchIndexForMatviewMinOrMax.

// if the materialized view has MIN / MAX, try to find an index defined on the source table
// covering all group by cols / exprs to avoid expensive tablescan.
// For now, the only acceptable index is defined exactly on the group by columns IN ORDER.
// This allows the same key to be used to do lookups on the grouped table index and the
// base table index.
// TODO: More flexible (but usually less optimal*) indexes may be allowed here and supported
// in the EE in the future including:
//   -- *indexes on the group keys listed out of order
//   -- *indexes on the group keys as a prefix before other indexed values.
//   -- (ENG-6511) indexes on the group keys PLUS the MIN/MAX argument value (to eliminate post-filtering)
// This function is mostly re-written for the fix of ENG-6511. --yzhang
private static Index findBestMatchIndexForMatviewMinOrMax(MaterializedViewInfo matviewinfo, Table srcTable, List<AbstractExpression> groupbyExprs, AbstractExpression minMaxAggExpr) {
    CatalogMap<Index> allIndexes = srcTable.getIndexes();
    StmtTableScan tableScan = new StmtTargetTableScan(srcTable);
    // Candidate index. If we can find an index covering both group-by columns and aggExpr (optimal) then we will
    // return immediately.
    // If the index found covers only group-by columns (sub-optimal), we will first cache it here.
    Index candidate = null;
    for (Index index : allIndexes) {
        // indexOptimalForMinMax == true if the index covered both the group-by columns and the min/max aggExpr.
        boolean indexOptimalForMinMax = false;
        // If minMaxAggExpr is not null, the diff can be zero or one.
        // Otherwise, for a usable index, its number of columns must agree with that of the group-by columns.
        final int diffAllowance = minMaxAggExpr == null ? 0 : 1;
        // Get all indexed exprs if there is any.
        String expressionjson = index.getExpressionsjson();
        List<AbstractExpression> indexedExprs = null;
        if (!expressionjson.isEmpty()) {
            try {
                indexedExprs = AbstractExpression.fromJSONArrayString(expressionjson, tableScan);
            } catch (JSONException e) {
                e.printStackTrace();
                assert (false);
                return null;
            }
        }
        // Get source table columns.
        List<Column> srcColumnArray = CatalogUtil.getSortedCatalogItems(srcTable.getColumns(), "index");
        MatViewIndexMatchingGroupby matchingCase = null;
        if (groupbyExprs == null) {
            // This means group-by columns are all simple columns.
            // It also means we can only access the group-by columns by colref.
            List<ColumnRef> groupbyColRefs = CatalogUtil.getSortedCatalogItems(matviewinfo.getGroupbycols(), "index");
            if (indexedExprs == null) {
                matchingCase = MatViewIndexMatchingGroupby.GB_COL_IDX_COL;
                // All the columns in the index are also simple columns, EASY! colref vs. colref
                List<ColumnRef> indexedColRefs = CatalogUtil.getSortedCatalogItems(index.getColumns(), "index");
                // indexedColRefs.size() == groupbyColRefs.size() + 1 (optimal, diffAllowance == 1)
                if (isInvalidIndexCandidate(indexedColRefs.size(), groupbyColRefs.size(), diffAllowance)) {
                    continue;
                }
                if (!isGroupbyMatchingIndex(matchingCase, groupbyColRefs, null, indexedColRefs, null, null)) {
                    continue;
                }
                if (isValidIndexCandidateForMinMax(indexedColRefs.size(), groupbyColRefs.size(), diffAllowance)) {
                    if (!isIndexOptimalForMinMax(matchingCase, minMaxAggExpr, indexedColRefs, null, srcColumnArray)) {
                        continue;
                    }
                    indexOptimalForMinMax = true;
                }
            } else {
                matchingCase = MatViewIndexMatchingGroupby.GB_COL_IDX_EXP;
                //                              for    index columns: convert    tve => col
                if (isInvalidIndexCandidate(indexedExprs.size(), groupbyColRefs.size(), diffAllowance)) {
                    continue;
                }
                if (!isGroupbyMatchingIndex(matchingCase, groupbyColRefs, null, null, indexedExprs, srcColumnArray)) {
                    continue;
                }
                if (isValidIndexCandidateForMinMax(indexedExprs.size(), groupbyColRefs.size(), diffAllowance)) {
                    if (!isIndexOptimalForMinMax(matchingCase, minMaxAggExpr, null, indexedExprs, null)) {
                        continue;
                    }
                    indexOptimalForMinMax = true;
                }
            }
        } else {
            matchingCase = MatViewIndexMatchingGroupby.GB_EXP_IDX_EXP;
            // AND, indexedExprs must not be null in this case. (yeah!)
            if (indexedExprs == null) {
                continue;
            }
            if (isInvalidIndexCandidate(indexedExprs.size(), groupbyExprs.size(), diffAllowance)) {
                continue;
            }
            if (!isGroupbyMatchingIndex(matchingCase, null, groupbyExprs, null, indexedExprs, null)) {
                continue;
            }
            if (isValidIndexCandidateForMinMax(indexedExprs.size(), groupbyExprs.size(), diffAllowance)) {
                if (!isIndexOptimalForMinMax(matchingCase, minMaxAggExpr, null, indexedExprs, null)) {
                    continue;
                }
                indexOptimalForMinMax = true;
            }
        }
        // NOW index at least covered all group-by columns (sub-optimal candidate)
        if (!index.getPredicatejson().isEmpty()) {
            // Additional check for partial indexes to make sure matview WHERE clause
            // covers the partial index predicate
            List<AbstractExpression> coveringExprs = new ArrayList<>();
            List<AbstractExpression> exactMatchCoveringExprs = new ArrayList<>();
            try {
                String encodedPredicate = matviewinfo.getPredicate();
                if (!encodedPredicate.isEmpty()) {
                    String predicate = Encoder.hexDecodeToString(encodedPredicate);
                    AbstractExpression matViewPredicate = AbstractExpression.fromJSONString(predicate, tableScan);
                    coveringExprs.addAll(ExpressionUtil.uncombineAny(matViewPredicate));
                }
            } catch (JSONException e) {
                e.printStackTrace();
                assert (false);
                return null;
            }
            String predicatejson = index.getPredicatejson();
            if (!predicatejson.isEmpty() && !SubPlanAssembler.isPartialIndexPredicateCovered(tableScan, coveringExprs, predicatejson, exactMatchCoveringExprs)) {
                // where clause -- give up on this index
                continue;
            }
        }
        // it is already the best index we can get, return immediately.
        if (indexOptimalForMinMax) {
            return index;
        }
        // otherwise wait to see if we can find something better!
        candidate = index;
    }
    return candidate;
}
Also used : ArrayList(java.util.ArrayList) JSONException(org.json_voltpatches.JSONException) Index(org.voltdb.catalog.Index) Constraint(org.voltdb.catalog.Constraint) StmtTableScan(org.voltdb.planner.parseinfo.StmtTableScan) AbstractExpression(org.voltdb.expressions.AbstractExpression) Column(org.voltdb.catalog.Column) StmtTargetTableScan(org.voltdb.planner.parseinfo.StmtTargetTableScan) ColumnRef(org.voltdb.catalog.ColumnRef)

Example 9 with ColumnRef

use of org.voltdb.catalog.ColumnRef in project voltdb by VoltDB.

the class MaterializedViewProcessor method setGroupedTablePartitionColumn.

private void setGroupedTablePartitionColumn(MaterializedViewInfo mvi, Column partitionColumn) throws VoltCompilerException {
    // A view of a replicated table is replicated.
    // A view of a partitioned table is partitioned -- regardless of whether it has a partition key
    // -- it certainly isn't replicated!
    // If the partitioning column is grouped, its counterpart is the partitioning column of the view table.
    // Otherwise, the view table just doesn't have a partitioning column
    // -- it is seemingly randomly distributed,
    // and its grouped columns are only locally unique but not globally unique.
    Table destTable = mvi.getDest();
    // Get the grouped columns in "index" order.
    // This order corresponds to the iteration order of the MaterializedViewInfo's group by columns.
    List<Column> destColumnArray = CatalogUtil.getSortedCatalogItems(destTable.getColumns(), "index");
    // Note getTypeName gets the column name -- go figure.
    String partitionColName = partitionColumn.getTypeName();
    if (mvi.getGroupbycols().size() > 0) {
        int index = 0;
        for (ColumnRef cref : CatalogUtil.getSortedCatalogItems(mvi.getGroupbycols(), "index")) {
            Column srcCol = cref.getColumn();
            if (srcCol.getName().equals(partitionColName)) {
                Column destCol = destColumnArray.get(index);
                destTable.setPartitioncolumn(destCol);
                return;
            }
            ++index;
        }
    } else {
        String complexGroupbyJson = mvi.getGroupbyexpressionsjson();
        if (complexGroupbyJson.length() > 0) {
            int partitionColIndex = partitionColumn.getIndex();
            List<AbstractExpression> mvComplexGroupbyCols = null;
            try {
                mvComplexGroupbyCols = AbstractExpression.fromJSONArrayString(complexGroupbyJson, null);
            } catch (JSONException e) {
                e.printStackTrace();
            }
            int index = 0;
            for (AbstractExpression expr : mvComplexGroupbyCols) {
                if (expr instanceof TupleValueExpression) {
                    TupleValueExpression tve = (TupleValueExpression) expr;
                    if (tve.getColumnIndex() == partitionColIndex) {
                        Column destCol = destColumnArray.get(index);
                        destTable.setPartitioncolumn(destCol);
                        return;
                    }
                }
                ++index;
            }
        }
    }
}
Also used : TupleValueExpression(org.voltdb.expressions.TupleValueExpression) Table(org.voltdb.catalog.Table) AbstractExpression(org.voltdb.expressions.AbstractExpression) Column(org.voltdb.catalog.Column) JSONException(org.json_voltpatches.JSONException) ColumnRef(org.voltdb.catalog.ColumnRef) Constraint(org.voltdb.catalog.Constraint)

Example 10 with ColumnRef

use of org.voltdb.catalog.ColumnRef in project voltdb by VoltDB.

the class PlanAssembler method getNextInsertPlan.

/**
     * Get the next (only) plan for a SQL insertion. Inserts are pretty simple
     * and this will only generate a single plan.
     *
     * @return The next (only) plan for a given insert statement, then null.
     */
private CompiledPlan getNextInsertPlan() {
    // do it the right way once, then return null after that
    if (m_bestAndOnlyPlanWasGenerated) {
        return null;
    }
    m_bestAndOnlyPlanWasGenerated = true;
    // figure out which table we're inserting into
    assert (m_parsedInsert.m_tableList.size() == 1);
    Table targetTable = m_parsedInsert.m_tableList.get(0);
    StmtSubqueryScan subquery = m_parsedInsert.getSubqueryScan();
    CompiledPlan retval = null;
    String isContentDeterministic = null;
    if (subquery != null) {
        isContentDeterministic = subquery.calculateContentDeterminismMessage();
        if (subquery.getBestCostPlan() == null) {
            // in getBestCostPlan, above.
            throw new PlanningErrorException("INSERT INTO ... SELECT subquery could not be planned: " + m_recentErrorMsg);
        }
        boolean targetIsExportTable = tableListIncludesExportOnly(m_parsedInsert.m_tableList);
        InsertSubPlanAssembler subPlanAssembler = new InsertSubPlanAssembler(m_catalogDb, m_parsedInsert, m_partitioning, targetIsExportTable);
        AbstractPlanNode subplan = subPlanAssembler.nextPlan();
        if (subplan == null) {
            throw new PlanningErrorException(subPlanAssembler.m_recentErrorMsg);
        }
        assert (m_partitioning.isJoinValid());
        //  Use the subquery's plan as the basis for the insert plan.
        retval = subquery.getBestCostPlan();
    } else {
        retval = new CompiledPlan();
    }
    retval.setReadOnly(false);
    //      for the INSERT ... SELECT ... case, by analyzing the subquery.
    if (m_parsedInsert.m_isUpsert) {
        boolean hasPrimaryKey = false;
        for (Constraint constraint : targetTable.getConstraints()) {
            if (constraint.getType() != ConstraintType.PRIMARY_KEY.getValue()) {
                continue;
            }
            hasPrimaryKey = true;
            boolean targetsPrimaryKey = false;
            for (ColumnRef colRef : constraint.getIndex().getColumns()) {
                int primary = colRef.getColumn().getIndex();
                for (Column targetCol : m_parsedInsert.m_columns.keySet()) {
                    if (targetCol.getIndex() == primary) {
                        targetsPrimaryKey = true;
                        break;
                    }
                }
                if (!targetsPrimaryKey) {
                    throw new PlanningErrorException("UPSERT on table \"" + targetTable.getTypeName() + "\" must specify a value for primary key \"" + colRef.getColumn().getTypeName() + "\".");
                }
            }
        }
        if (!hasPrimaryKey) {
            throw new PlanningErrorException("UPSERT is not allowed on table \"" + targetTable.getTypeName() + "\" that has no primary key.");
        }
    }
    CatalogMap<Column> targetTableColumns = targetTable.getColumns();
    for (Column col : targetTableColumns) {
        boolean needsValue = (!m_parsedInsert.m_isUpsert) && (col.getNullable() == false) && (col.getDefaulttype() == 0);
        if (needsValue && !m_parsedInsert.m_columns.containsKey(col)) {
            // This check could be done during parsing?
            throw new PlanningErrorException("Column " + col.getName() + " has no default and is not nullable.");
        }
        // hint that this statement can be executed SP.
        if (col.equals(m_partitioning.getPartitionColForDML()) && subquery == null) {
            // When AdHoc insert-into-select is supported, we'll need to be able to infer
            // partitioning of the sub-select
            AbstractExpression expr = m_parsedInsert.getExpressionForPartitioning(col);
            String fullColumnName = targetTable.getTypeName() + "." + col.getTypeName();
            m_partitioning.addPartitioningExpression(fullColumnName, expr, expr.getValueType());
        }
    }
    NodeSchema matSchema = null;
    if (subquery == null) {
        matSchema = new NodeSchema();
    }
    int[] fieldMap = new int[m_parsedInsert.m_columns.size()];
    int i = 0;
    //   - For VALUES(...) insert statements, build the materialize node's schema
    for (Map.Entry<Column, AbstractExpression> e : m_parsedInsert.m_columns.entrySet()) {
        Column col = e.getKey();
        fieldMap[i] = col.getIndex();
        if (matSchema != null) {
            AbstractExpression valExpr = e.getValue();
            valExpr.setInBytes(col.getInbytes());
            // Patch over any mismatched expressions with an explicit cast.
            // Most impossible-to-cast type combinations should have already been caught by the
            // parser, but there are also runtime checks in the casting code
            // -- such as for out of range values.
            valExpr = castExprIfNeeded(valExpr, col);
            matSchema.addColumn(AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, col.getTypeName(), col.getTypeName(), valExpr);
        }
        i++;
    }
    // the root of the insert plan may be an InsertPlanNode, or
    // it may be a scan plan node.  We may do an inline InsertPlanNode
    // as well.
    InsertPlanNode insertNode = new InsertPlanNode();
    insertNode.setTargetTableName(targetTable.getTypeName());
    if (subquery != null) {
        insertNode.setSourceIsPartitioned(!subquery.getIsReplicated());
    }
    // The field map tells the insert node
    // where to put values produced by child into the row to be inserted.
    insertNode.setFieldMap(fieldMap);
    AbstractPlanNode root = insertNode;
    if (matSchema != null) {
        MaterializePlanNode matNode = new MaterializePlanNode(matSchema);
        // connect the insert and the materialize nodes together
        insertNode.addAndLinkChild(matNode);
        retval.statementGuaranteesDeterminism(false, true, isContentDeterministic);
    } else {
        ScanPlanNodeWithInlineInsert planNode = (retval.rootPlanGraph instanceof ScanPlanNodeWithInlineInsert) ? ((ScanPlanNodeWithInlineInsert) retval.rootPlanGraph) : null;
        // Inline upsert might be possible, but not now.
        if (planNode != null && (!m_parsedInsert.m_isUpsert) && (!planNode.hasInlineAggregateNode())) {
            planNode.addInlinePlanNode(insertNode);
            root = planNode.getAbstractNode();
        } else {
            // Otherwise just make it out-of-line.
            insertNode.addAndLinkChild(retval.rootPlanGraph);
        }
    }
    if (m_partitioning.wasSpecifiedAsSingle() || m_partitioning.isInferredSingle()) {
        insertNode.setMultiPartition(false);
        retval.rootPlanGraph = root;
        return retval;
    }
    insertNode.setMultiPartition(true);
    // Add a compensating sum of modified tuple counts or a limit 1
    // AND a send on top of a union-like receive node.
    boolean isReplicated = targetTable.getIsreplicated();
    retval.rootPlanGraph = addCoordinatorToDMLNode(root, isReplicated);
    return retval;
}
Also used : StmtSubqueryScan(org.voltdb.planner.parseinfo.StmtSubqueryScan) AbstractPlanNode(org.voltdb.plannodes.AbstractPlanNode) Table(org.voltdb.catalog.Table) Constraint(org.voltdb.catalog.Constraint) InsertPlanNode(org.voltdb.plannodes.InsertPlanNode) MaterializePlanNode(org.voltdb.plannodes.MaterializePlanNode) Constraint(org.voltdb.catalog.Constraint) AbstractExpression(org.voltdb.expressions.AbstractExpression) Column(org.voltdb.catalog.Column) SchemaColumn(org.voltdb.plannodes.SchemaColumn) ColumnRef(org.voltdb.catalog.ColumnRef) Map(java.util.Map) CatalogMap(org.voltdb.catalog.CatalogMap) HashMap(java.util.HashMap) NodeSchema(org.voltdb.plannodes.NodeSchema)

Aggregations

ColumnRef (org.voltdb.catalog.ColumnRef)29 AbstractExpression (org.voltdb.expressions.AbstractExpression)19 Column (org.voltdb.catalog.Column)18 Constraint (org.voltdb.catalog.Constraint)16 ArrayList (java.util.ArrayList)14 JSONException (org.json_voltpatches.JSONException)14 Index (org.voltdb.catalog.Index)9 Table (org.voltdb.catalog.Table)9 TupleValueExpression (org.voltdb.expressions.TupleValueExpression)9 HashSet (java.util.HashSet)5 StmtTableScan (org.voltdb.planner.parseinfo.StmtTableScan)5 StmtTargetTableScan (org.voltdb.planner.parseinfo.StmtTargetTableScan)4 SchemaColumn (org.voltdb.plannodes.SchemaColumn)4 VoltType (org.voltdb.VoltType)3 VoltCompilerException (org.voltdb.compiler.VoltCompiler.VoltCompilerException)3 ConstantValueExpression (org.voltdb.expressions.ConstantValueExpression)3 HashMap (java.util.HashMap)2 Set (java.util.Set)2 VoltXMLElement (org.hsqldb_voltpatches.VoltXMLElement)2 Procedure (org.voltdb.catalog.Procedure)2