Search in sources :

Example 1 with Constraint

use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.

the class ReportMaker method generateSchemaRow.

static String generateSchemaRow(Table table, boolean isExportTable) {
    StringBuilder sb = new StringBuilder();
    sb.append("<tr class='primaryrow'>");
    // column 1: table name
    String anchor = table.getTypeName().toLowerCase();
    sb.append("<td style='white-space: nowrap;'><i id='s-" + anchor + "--icon' class='icon-chevron-right'></i> <a href='#' id='s-");
    sb.append(anchor).append("' class='togglex'>");
    sb.append(table.getTypeName());
    sb.append("</a></td>");
    // column 2: type
    sb.append("<td>");
    if (table.getMaterializer() != null) {
        tag(sb, "info", "Materialized View");
    } else {
        if (isExportTable) {
            tag(sb, "inverse", "Export Streams");
        } else {
            tag(sb, null, "Table");
        }
    }
    sb.append("</td>");
    // column 3: partitioning
    sb.append("<td style='whitespace: nowrap;'>");
    if (table.getIsreplicated()) {
        tag(sb, "warning", "Replicated");
    } else {
        tag(sb, "success", "Partitioned");
        Column partitionCol = table.getPartitioncolumn();
        if (partitionCol != null) {
            sb.append("<small> on " + partitionCol.getName() + "</small>");
        } else {
            Table matSrc = table.getMaterializer();
            if (matSrc != null) {
                sb.append("<small> with " + matSrc.getTypeName() + "</small>");
            }
        }
    }
    sb.append("</td>");
    // column 4: column count
    sb.append("<td>");
    sb.append(table.getColumns().size());
    sb.append("</td>");
    // column 5: index count
    sb.append("<td>");
    sb.append(table.getIndexes().size());
    // computing unused indexes
    int unusedIndexes = 0;
    for (Index index : table.getIndexes()) {
        IndexAnnotation indexAnnotation = (IndexAnnotation) index.getAnnotation();
        if (indexAnnotation == null) {
            unusedIndexes++;
        }
    }
    if (unusedIndexes != 0) {
        sb.append(" (" + unusedIndexes + " unused)");
    }
    sb.append("</td>");
    // column 6: has pkey
    sb.append("<td>");
    boolean found = false;
    for (Constraint constraint : table.getConstraints()) {
        if (ConstraintType.get(constraint.getType()) == ConstraintType.PRIMARY_KEY) {
            found = true;
            break;
        }
    }
    if (found) {
        tag(sb, "info", "Has-PKey");
    } else {
        tag(sb, null, "No-PKey");
    }
    sb.append("</td>");
    // column 6: has tuple limit
    sb.append("<td>");
    if (table.getTuplelimit() != Integer.MAX_VALUE) {
        tag(sb, "info", String.valueOf(table.getTuplelimit()));
        if (CatalogUtil.getLimitPartitionRowsDeleteStmt(table) != null) {
            sb.append("<small>enforced by DELETE statement</small>");
        }
    } else {
        tag(sb, null, "No-limit");
    }
    sb.append("</td>");
    sb.append("</tr>\n");
    // BUILD THE DROPDOWN FOR THE DDL / INDEXES DETAIL
    sb.append("<tr class='tablesorter-childRow'><td class='invert' colspan='7' id='s-" + table.getTypeName().toLowerCase() + "--dropdown'>\n");
    TableAnnotation annotation = (TableAnnotation) table.getAnnotation();
    if (annotation != null) {
        // output the DDL
        if (annotation.ddl == null) {
            sb.append("<p>MISSING DDL</p>\n");
        } else {
            String ddl = escapeHtml4(annotation.ddl);
            sb.append("<p><pre>" + ddl + "</pre></p>\n");
        }
        // make sure procs appear in only one category
        annotation.proceduresThatReadThis.removeAll(annotation.proceduresThatUpdateThis);
        if (annotation.proceduresThatReadThis.size() > 0) {
            sb.append("<p>Read-only by procedures: ");
            List<String> procs = new ArrayList<String>();
            for (Procedure proc : annotation.proceduresThatReadThis) {
                procs.add("<a href='#p-" + proc.getTypeName() + "'>" + proc.getTypeName() + "</a>");
            }
            sb.append(StringUtils.join(procs, ", "));
            sb.append("</p>");
        }
        if (annotation.proceduresThatUpdateThis.size() > 0) {
            sb.append("<p>Read/Write by procedures: ");
            List<String> procs = new ArrayList<String>();
            for (Procedure proc : annotation.proceduresThatUpdateThis) {
                procs.add("<a href='#p-" + proc.getTypeName() + "'>" + proc.getTypeName() + "</a>");
            }
            sb.append(StringUtils.join(procs, ", "));
            sb.append("</p>");
        }
    }
    // LIMIT PARTITION ROW statement may also use the index in this table, prepare the information for report
    if (!table.getTuplelimitdeletestmt().isEmpty()) {
        assert (table.getTuplelimitdeletestmt().size() == 1);
        Statement stmt = table.getTuplelimitdeletestmt().iterator().next();
        for (String tableDotIndexPair : stmt.getIndexesused().split(",")) {
            if (tableDotIndexPair.length() == 0) {
                continue;
            }
            String[] parts = tableDotIndexPair.split("\\.", 2);
            assert (parts.length == 2);
            if (parts.length != 2) {
                continue;
            }
            String tableName = parts[0];
            String indexName = parts[1];
            if (!table.getTypeName().equals(tableName)) {
                continue;
            }
            Index i = table.getIndexes().get(indexName);
            assert (i != null);
            IndexAnnotation ia = (IndexAnnotation) i.getAnnotation();
            if (ia == null) {
                ia = new IndexAnnotation();
                i.setAnnotation(ia);
            }
            ia.statementsThatUseThis.add(stmt);
        }
    }
    if (table.getIndexes().size() > 0) {
        sb.append(generateIndexesTable(table));
    } else {
        sb.append("<p>No indexes defined on table.</p>\n");
    }
    // Generate explainview report.
    if (table.getMaterializer() != null) {
        sb.append(generateExplainViewTable(table));
    }
    sb.append("</td></tr>\n");
    return sb.toString();
}
Also used : Table(org.voltdb.catalog.Table) Constraint(org.voltdb.catalog.Constraint) Statement(org.voltdb.catalog.Statement) ArrayList(java.util.ArrayList) Index(org.voltdb.catalog.Index) Constraint(org.voltdb.catalog.Constraint) Column(org.voltdb.catalog.Column) Procedure(org.voltdb.catalog.Procedure)

Example 2 with Constraint

use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.

the class LoadSinglepartitionTable method run.

/**
     * These parameters, with the exception of ctx, map to user provided values.
     *
     * @param ctx Internal API provided to all system procedures.
     * @param partitionParam Partitioning parameter used to match invocation to partition.
     * @param tableName Name of persistent, parititoned table receiving data.
     * @param table A VoltTable with schema matching the target table containing data to load.
     *              It's assumed that each row in this table partitions to the same partition
     *              as the other rows, and to the same partition as the partition parameter.
     * @param upsertMode True if using upsert instead of insert. If using insert, this proc
     *              will fail if there are any uniqueness constraints violated.
     * @return The number of rows modified. This will be inserts in insert mode, but in upsert
     *              mode, this will be the sum of inserts and updates.
     * @throws VoltAbortException on any failure, but the most common failures are non-matching
     *              partitioning or unique constraint violations.
     */
public long run(SystemProcedureExecutionContext ctx, byte[] partitionParam, String tableName, byte upsertMode, VoltTable table) throws VoltAbortException {
    // if tableName is replicated, fail.
    // otherwise, create a VoltTable for each partition and
    // split up the incoming table .. then send those partial
    // tables to the appropriate sites.
    // Get the metadata object for the table in question from the global metadata/config
    // store, the Catalog.
    Table catTable = ctx.getDatabase().getTables().getIgnoreCase(tableName);
    if (catTable == null) {
        throw new VoltAbortException("Table not present in catalog.");
    }
    // if tableName is replicated, fail.
    if (catTable.getIsreplicated()) {
        throw new VoltAbortException(String.format("LoadSinglepartitionTable incompatible with replicated table %s.", tableName));
    }
    // convert from 8bit signed integer (byte) to boolean
    boolean isUpsert = (upsertMode != 0);
    // upsert requires a primary key on the table to work
    if (isUpsert) {
        boolean hasPkey = false;
        for (Constraint c : catTable.getConstraints()) {
            if (c.getType() == ConstraintType.PRIMARY_KEY.getValue()) {
                hasPkey = true;
                break;
            }
        }
        if (!hasPkey) {
            throw new VoltAbortException(String.format("The --update argument cannot be used for LoadingSinglePartionTable because the table %s does not have a primary key. " + "Either remove the --update argument or add a primary key to the table.", tableName));
        }
    }
    // action should be either "insert" or "upsert"
    final String action = (isUpsert ? "upsert" : "insert");
    // fix any case problems
    tableName = catTable.getTypeName();
    // check that the schema of the input matches
    int columnCount = table.getColumnCount();
    //////////////////////////////////////////////////////////////////////
    // Find the insert/upsert statement for this table
    // This is actually the big trick this procedure does.
    // It borrows the insert plan from the auto-generated insert procedure
    // named "TABLENAME.insert" or "TABLENAME.upsert".
    // We don't like it when users do this stuff, but it is safe in this
    // case.
    //
    // Related code to read is org.voltdb.DefaultProcedureManager, which
    // manages all of the default (CRUD) procedures created lazily for
    // each table in the database, including the plans used here.
    //
    String crudProcName = String.format("%s.%s", tableName, action);
    Procedure p = ctx.ensureDefaultProcLoaded(crudProcName);
    if (p == null) {
        throw new VoltAbortException(String.format("Unable to locate auto-generated CRUD %s statement for table %s", action, tableName));
    }
    // statements of all single-statement procs are named "sql"
    Statement catStmt = p.getStatements().get(VoltDB.ANON_STMT_NAME);
    if (catStmt == null) {
        throw new VoltAbortException(String.format("Unable to find SQL statement for found table %s: BAD", tableName));
    }
    // Create a SQLStmt instance on the fly
    // This unusual to do, as they are typically required to be final instance variables.
    // This only works because the SQL text and plan is identical from the borrowed procedure.
    SQLStmt stmt = new SQLStmt(catStmt.getSqltext());
    m_runner.initSQLStmt(stmt, catStmt);
    long queued = 0;
    long executed = 0;
    // make sure at the start of the table
    table.resetRowPosition();
    // iterate over the rows queueing a sql statement for each row to insert
    for (int i = 0; table.advanceRow(); ++i) {
        Object[] params = new Object[columnCount];
        // get the parameters from the volt table
        for (int col = 0; col < columnCount; ++col) {
            params[col] = table.get(col, table.getColumnType(col));
        }
        // queue an insert and count it
        voltQueueSQL(stmt, params);
        ++queued;
        // 100 is an arbitrary number
        if ((i % 100) == 0) {
            executed += executeSQL();
        }
    }
    // execute any leftover batched statements
    if (queued > executed) {
        executed += executeSQL();
    }
    return executed;
}
Also used : SQLStmt(org.voltdb.SQLStmt) Table(org.voltdb.catalog.Table) VoltTable(org.voltdb.VoltTable) Constraint(org.voltdb.catalog.Constraint) Statement(org.voltdb.catalog.Statement) VoltSystemProcedure(org.voltdb.VoltSystemProcedure) Procedure(org.voltdb.catalog.Procedure) Constraint(org.voltdb.catalog.Constraint)

Example 3 with Constraint

use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.

the class CatalogSchemaTools method toSchema.

/**
     * Convert a Table catalog object into the proper SQL DDL, including all indexes,
     * constraints, and foreign key references.
     * Also returns just the CREATE TABLE statement, since, like all good methods,
     * it should have two purposes....
     * It would be nice to have a separate method to just generate the CREATE TABLE,
     * but we use that pass to also figure out what separate constraint and index
     * SQL DDL needs to be generated, so instead, we opt to build the CREATE TABLE DDL
     * separately as we go here, and then fill it in to the StringBuilder being used
     * to construct the full canonical DDL at the appropriate time.
     * @param sb - the schema being built
     * @param catalog_tbl - object to be analyzed
     * @param viewQuery - the Query if this Table is a View
     * @param isExportOnly Is this a export table.
     * @param streamPartitionColumn stream partition column
     * @param streamTarget - true if this Table is an Export Table
     * @return SQL Schema text representing the CREATE TABLE statement to generate the table
     */
public static String toSchema(StringBuilder sb, Table catalog_tbl, String viewQuery, boolean isExportOnly, String streamPartitionColumn, String streamTarget) {
    assert (!catalog_tbl.getColumns().isEmpty());
    boolean tableIsView = (viewQuery != null);
    // We need the intermediate results of building the table schema string so that
    // we can return the full CREATE TABLE statement, so accumulate it separately
    final StringBuilder table_sb = new StringBuilder();
    final Set<Index> skip_indexes = new HashSet<>();
    final Set<Constraint> skip_constraints = new HashSet<>();
    if (tableIsView) {
        table_sb.append("CREATE VIEW ").append(catalog_tbl.getTypeName()).append(" (");
    } else {
        if (isExportOnly) {
            table_sb.append("CREATE STREAM ").append(catalog_tbl.getTypeName());
            if (streamPartitionColumn != null && viewQuery == null) {
                table_sb.append(" PARTITION ON COLUMN ").append(streamPartitionColumn);
            }
            //Default target means no target.
            if (streamTarget != null && !streamTarget.equalsIgnoreCase(Constants.DEFAULT_EXPORT_CONNECTOR_NAME)) {
                table_sb.append(" EXPORT TO TARGET ").append(streamTarget);
            }
        } else {
            table_sb.append("CREATE TABLE ").append(catalog_tbl.getTypeName());
        }
        table_sb.append(" (");
    }
    // Columns
    String add = "\n";
    for (Column catalog_col : CatalogUtil.getSortedCatalogItems(catalog_tbl.getColumns(), "index")) {
        VoltType col_type = VoltType.get((byte) catalog_col.getType());
        if (tableIsView) {
            table_sb.append(add).append(spacer).append(catalog_col.getTypeName());
            add = ",\n";
            continue;
        }
        table_sb.append(add).append(spacer).append(catalog_col.getTypeName()).append(" ").append(col_type.toSQLString()).append(col_type.isVariableLength() && catalog_col.getSize() > 0 ? "(" + catalog_col.getSize() + (catalog_col.getInbytes() ? " BYTES" : "") + ")" : "");
        // Default value
        String defaultvalue = catalog_col.getDefaultvalue();
        //VoltType defaulttype = VoltType.get((byte)catalog_col.getDefaulttype());
        boolean nullable = catalog_col.getNullable();
        // TODO: Shouldn't have to check whether the string contains "null"
        if (defaultvalue == null) {
        } else if (defaultvalue.toLowerCase().equals("null") && nullable) {
            defaultvalue = null;
        } else {
            if (col_type == VoltType.TIMESTAMP) {
                if (defaultvalue.startsWith("CURRENT_TIMESTAMP")) {
                    defaultvalue = "CURRENT_TIMESTAMP";
                } else {
                    assert (defaultvalue.matches("[0-9]+"));
                    long epoch = Long.parseLong(defaultvalue);
                    Date d = new Date(epoch / 1000);
                    SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
                    defaultvalue = "\'" + sdf.format(d) + "." + StringUtils.leftPad(String.valueOf(epoch % 1000000), 6, "0") + "\'";
                }
            } else {
                // XXX: if (defaulttype != VoltType.VOLTFUNCTION) {
                // TODO: Escape strings properly
                defaultvalue = defaultvalue.replace("\'", "\'\'");
                defaultvalue = "'" + defaultvalue + "'";
            }
        }
        if (defaultvalue == null) {
            table_sb.append((!nullable ? " NOT NULL" : ""));
        } else {
            table_sb.append(" DEFAULT ").append(defaultvalue != null ? defaultvalue : "NULL").append(!nullable ? " NOT NULL" : "");
        }
        // Single-column constraints
        for (ConstraintRef catalog_const_ref : catalog_col.getConstraints()) {
            Constraint catalog_const = catalog_const_ref.getConstraint();
            ConstraintType const_type = ConstraintType.get(catalog_const.getType());
            // Check if there is another column in our table with the same constraint
            // If there is, then we need to add it to the end of the table definition
            boolean found = false;
            for (Column catalog_other_col : catalog_tbl.getColumns()) {
                if (catalog_other_col.equals(catalog_col))
                    continue;
                if (catalog_other_col.getConstraints().getIgnoreCase(catalog_const.getTypeName()) != null) {
                    found = true;
                    break;
                }
            }
            if (!found) {
                switch(const_type) {
                    case FOREIGN_KEY:
                        {
                            Table catalog_fkey_tbl = catalog_const.getForeignkeytable();
                            Column catalog_fkey_col = null;
                            for (ColumnRef ref : catalog_const.getForeignkeycols()) {
                                catalog_fkey_col = ref.getColumn();
                                // Nasty hack to get first item
                                break;
                            }
                            assert (catalog_fkey_col != null);
                            table_sb.append(" REFERENCES ").append(catalog_fkey_tbl.getTypeName()).append(" (").append(catalog_fkey_col.getTypeName()).append(")");
                            skip_constraints.add(catalog_const);
                            break;
                        }
                    default:
                }
            }
        }
        add = ",\n";
    }
    // Constraints
    for (Constraint catalog_const : catalog_tbl.getConstraints()) {
        if (skip_constraints.contains(catalog_const))
            continue;
        ConstraintType const_type = ConstraintType.get(catalog_const.getType());
        // Primary Keys / Unique Constraints
        if (const_type == ConstraintType.PRIMARY_KEY || const_type == ConstraintType.UNIQUE) {
            Index catalog_idx = catalog_const.getIndex();
            if (!tableIsView) {
                // Get the ConstraintType.
                table_sb.append(add).append(spacer);
                if (!catalog_const.getTypeName().startsWith(HSQLInterface.AUTO_GEN_PREFIX)) {
                    table_sb.append("CONSTRAINT ").append(catalog_const.getTypeName()).append(" ");
                }
                if (const_type == ConstraintType.PRIMARY_KEY || const_type == ConstraintType.UNIQUE) {
                    if (const_type == ConstraintType.PRIMARY_KEY) {
                        table_sb.append("PRIMARY KEY (");
                    } else {
                        if (catalog_idx.getAssumeunique()) {
                            table_sb.append("ASSUMEUNIQUE (");
                        } else {
                            table_sb.append("UNIQUE (");
                        }
                    }
                    String col_add = "";
                    if (catalog_idx.getExpressionsjson() != null && !catalog_idx.getExpressionsjson().equals("")) {
                        String exprStrings = new String();
                        StmtTargetTableScan tableScan = new StmtTargetTableScan(catalog_tbl);
                        try {
                            List<AbstractExpression> expressions = AbstractExpression.fromJSONArrayString(catalog_idx.getExpressionsjson(), tableScan);
                            String sep = "";
                            for (AbstractExpression expr : expressions) {
                                exprStrings += sep + expr.explain(catalog_tbl.getTypeName());
                                sep = ",";
                            }
                        } catch (JSONException e) {
                        }
                        table_sb.append(col_add).append(exprStrings);
                    } else {
                        for (ColumnRef catalog_colref : CatalogUtil.getSortedCatalogItems(catalog_idx.getColumns(), "index")) {
                            table_sb.append(col_add).append(catalog_colref.getColumn().getTypeName());
                            col_add = ", ";
                        }
                    // FOR
                    }
                    table_sb.append(")");
                }
            }
            if (catalog_idx.getTypeName().startsWith(HSQLInterface.AUTO_GEN_PREFIX) || catalog_idx.getTypeName().startsWith(HSQLInterface.AUTO_GEN_MATVIEW)) {
                skip_indexes.add(catalog_idx);
            }
        // Foreign Key
        } else if (const_type == ConstraintType.FOREIGN_KEY) {
            Table catalog_fkey_tbl = catalog_const.getForeignkeytable();
            String col_add = "";
            String our_columns = "";
            String fkey_columns = "";
            for (ColumnRef catalog_colref : catalog_const.getForeignkeycols()) {
                // The name of the ColumnRef is the column in our base table
                Column our_column = catalog_tbl.getColumns().getIgnoreCase(catalog_colref.getTypeName());
                assert (our_column != null);
                our_columns += col_add + our_column.getTypeName();
                Column fkey_column = catalog_colref.getColumn();
                assert (fkey_column != null);
                fkey_columns += col_add + fkey_column.getTypeName();
                col_add = ", ";
            }
            table_sb.append(add).append(spacer + "CONSTRAINT ").append(catalog_const.getTypeName()).append(" FOREIGN KEY (").append(our_columns).append(") REFERENCES ").append(catalog_fkey_tbl.getTypeName()).append(" (").append(fkey_columns).append(")");
        }
        skip_constraints.add(catalog_const);
    }
    if (catalog_tbl.getTuplelimit() != Integer.MAX_VALUE) {
        table_sb.append(add).append(spacer + "LIMIT PARTITION ROWS ").append(String.valueOf(catalog_tbl.getTuplelimit()));
        String deleteStmt = CatalogUtil.getLimitPartitionRowsDeleteStmt(catalog_tbl);
        if (deleteStmt != null) {
            if (deleteStmt.endsWith(";")) {
                // StatementCompiler appends the semicolon, we don't want it here.
                deleteStmt = deleteStmt.substring(0, deleteStmt.length() - 1);
            }
            table_sb.append("\n" + spacer + spacer + "EXECUTE (").append(deleteStmt).append(")");
        }
    }
    if (viewQuery != null) {
        table_sb.append("\n) AS \n");
        table_sb.append(spacer).append(viewQuery).append(";\n");
    } else {
        table_sb.append("\n);\n");
    }
    // We've built the full CREATE TABLE statement for this table,
    // Append the generated table schema to the canonical DDL StringBuilder
    sb.append(table_sb.toString());
    // Partition Table for regular tables (non-streams)
    if (catalog_tbl.getPartitioncolumn() != null && viewQuery == null && !isExportOnly) {
        sb.append("PARTITION TABLE ").append(catalog_tbl.getTypeName()).append(" ON COLUMN ").append(catalog_tbl.getPartitioncolumn().getTypeName()).append(";\n");
    }
    // All other Indexes
    for (Index catalog_idx : catalog_tbl.getIndexes()) {
        if (skip_indexes.contains(catalog_idx))
            continue;
        if (catalog_idx.getUnique()) {
            if (catalog_idx.getAssumeunique()) {
                sb.append("CREATE ASSUMEUNIQUE INDEX ");
            } else {
                sb.append("CREATE UNIQUE INDEX ");
            }
        } else {
            sb.append("CREATE INDEX ");
        }
        sb.append(catalog_idx.getTypeName()).append(" ON ").append(catalog_tbl.getTypeName()).append(" (");
        add = "";
        String jsonstring = catalog_idx.getExpressionsjson();
        if (jsonstring.isEmpty()) {
            for (ColumnRef catalog_colref : CatalogUtil.getSortedCatalogItems(catalog_idx.getColumns(), "index")) {
                sb.append(add).append(catalog_colref.getColumn().getTypeName());
                add = ", ";
            }
        } else {
            List<AbstractExpression> indexedExprs = null;
            try {
                indexedExprs = AbstractExpression.fromJSONArrayString(jsonstring, new StmtTargetTableScan(catalog_tbl));
            } catch (JSONException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
            if (indexedExprs != null) {
                for (AbstractExpression expr : indexedExprs) {
                    sb.append(add).append(expr.explain(catalog_tbl.getTypeName()));
                    add = ", ";
                }
            }
        }
        sb.append(")");
        String jsonPredicate = catalog_idx.getPredicatejson();
        if (!jsonPredicate.isEmpty()) {
            try {
                AbstractExpression predicate = AbstractExpression.fromJSONString(jsonPredicate, new StmtTargetTableScan(catalog_tbl));
                sb.append(" WHERE ").append(predicate.explain(catalog_tbl.getTypeName()));
            } catch (JSONException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }
        sb.append(";\n");
    }
    if (catalog_tbl.getIsdred()) {
        sb.append("DR TABLE ").append(catalog_tbl.getTypeName()).append(";\n");
    }
    sb.append("\n");
    // statement to whoever might be interested (DDLCompiler, I'm looking in your direction)
    return table_sb.toString();
}
Also used : Table(org.voltdb.catalog.Table) Constraint(org.voltdb.catalog.Constraint) JSONException(org.json_voltpatches.JSONException) Index(org.voltdb.catalog.Index) Date(java.util.Date) AbstractExpression(org.voltdb.expressions.AbstractExpression) Column(org.voltdb.catalog.Column) VoltType(org.voltdb.VoltType) StmtTargetTableScan(org.voltdb.planner.parseinfo.StmtTargetTableScan) ConstraintType(org.voltdb.types.ConstraintType) ColumnRef(org.voltdb.catalog.ColumnRef) SimpleDateFormat(java.text.SimpleDateFormat) HashSet(java.util.HashSet) ConstraintRef(org.voltdb.catalog.ConstraintRef)

Example 4 with Constraint

use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.

the class DefaultProcedureManager method generateCrudPKeyWhereClause.

/**
     * Helper to generate a WHERE pkey_col1 = ?, pkey_col2 = ? ...; clause.
     * @param partitioncolumn partitioning column for the table
     * @param pkey constraint from the catalog
     * @param paramoffset 0-based counter of parameters in the full sql statement so far
     * @param sb string buffer accumulating the sql statement
     * @return offset in the index of the partition column
     */
private static int generateCrudPKeyWhereClause(Column partitioncolumn, Constraint pkey, StringBuilder sb) {
    // Sort the catalog index columns by index column order.
    ArrayList<ColumnRef> indexColumns = new ArrayList<ColumnRef>(pkey.getIndex().getColumns().size());
    for (ColumnRef c : pkey.getIndex().getColumns()) {
        indexColumns.add(c);
    }
    Collections.sort(indexColumns, new ColumnRefComparator());
    boolean first = true;
    int partitionOffset = -1;
    sb.append(" WHERE ");
    for (ColumnRef pkc : indexColumns) {
        if (!first)
            sb.append(" AND ");
        first = false;
        sb.append("(" + pkc.getColumn().getName() + " = ?" + ")");
        if (pkc.getColumn() == partitioncolumn) {
            partitionOffset = pkc.getIndex();
        }
    }
    return partitionOffset;
}
Also used : ArrayList(java.util.ArrayList) ColumnRef(org.voltdb.catalog.ColumnRef) Constraint(org.voltdb.catalog.Constraint)

Example 5 with Constraint

use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.

the class DefaultProcedureManager method build.

private void build() {
    for (Table table : m_db.getTables()) {
        String prefix = table.getTypeName() + '.';
        if (CatalogUtil.isTableExportOnly(m_db, table)) {
            Column partitioncolumn = table.getPartitioncolumn();
            if (partitioncolumn != null) {
                int partitionIndex = partitioncolumn.getIndex();
                addShimProcedure(prefix + "insert", table, null, true, partitionIndex, partitioncolumn, false);
            } else {
                addShimProcedure(prefix + "insert", table, null, true, -1, null, false);
            }
            continue;
        }
        // skip views XXX why no get by pkey?
        if (table.getMaterializer() != null) {
            continue;
        }
        // select/delete/update crud requires pkey. Pkeys are stored as constraints.
        final CatalogMap<Constraint> constraints = table.getConstraints();
        final Iterator<Constraint> it = constraints.iterator();
        Constraint pkey = null;
        while (it.hasNext()) {
            Constraint constraint = it.next();
            if (constraint.getType() == ConstraintType.PRIMARY_KEY.getValue()) {
                pkey = constraint;
                break;
            }
        }
        if (table.getIsreplicated()) {
            // Creating multi-partition insert procedures for replicated table
            addShimProcedure(prefix + "insert", table, null, true, -1, null, false);
            // Creating multi-partition delete/update/upsert procedures for replicated table with pkey
            if (pkey != null) {
                addShimProcedure(prefix + "delete", table, pkey, false, -1, null, false);
                addShimProcedure(prefix + "update", table, pkey, true, -1, null, false);
                addShimProcedure(prefix + "upsert", table, null, true, -1, null, false);
            }
            continue;
        }
        // get the partition column
        final Column partitioncolumn = table.getPartitioncolumn();
        // this check is an accommodation for some tests that don't flesh out a catalog
        if (partitioncolumn == null) {
            continue;
        }
        final int partitionIndex = partitioncolumn.getIndex();
        // all partitioned tables get insert crud procs
        addShimProcedure(prefix + "insert", table, null, true, partitionIndex, partitioncolumn, false);
        // Skip creation of CRUD select/delete/update for partitioned table if no primary key is declared.
        if (pkey == null) {
            continue;
        }
        // Primary key must include the partition column for the table
        // for select/delete/update
        int pkeyPartitionIndex = -1;
        CatalogMap<ColumnRef> pkeycols = pkey.getIndex().getColumns();
        Iterator<ColumnRef> pkeycolsit = pkeycols.iterator();
        while (pkeycolsit.hasNext()) {
            ColumnRef colref = pkeycolsit.next();
            if (colref.getColumn().equals(partitioncolumn)) {
                pkeyPartitionIndex = colref.getIndex();
                break;
            }
        }
        // if primary key does not include the partitioning column.
        if (pkeyPartitionIndex < 0) {
            continue;
        }
        int columnCount = table.getColumns().size();
        // select, delete, update and upsert here (insert generated above)
        // these next 3 prefix params with the pkey so the partition on the index of the partition column
        // within the pkey
        addShimProcedure(prefix + "select", table, pkey, false, pkeyPartitionIndex, partitioncolumn, true);
        addShimProcedure(prefix + "delete", table, pkey, false, pkeyPartitionIndex, partitioncolumn, false);
        // update partitions on the pkey column after the regular column
        addShimProcedure(prefix + "update", table, pkey, true, columnCount + pkeyPartitionIndex, partitioncolumn, false);
        // upsert partitions like a regular insert
        addShimProcedure(prefix + "upsert", table, null, true, partitionIndex, partitioncolumn, false);
    }
}
Also used : Table(org.voltdb.catalog.Table) Column(org.voltdb.catalog.Column) Constraint(org.voltdb.catalog.Constraint) ColumnRef(org.voltdb.catalog.ColumnRef) Constraint(org.voltdb.catalog.Constraint)

Aggregations

Constraint (org.voltdb.catalog.Constraint)17 Table (org.voltdb.catalog.Table)13 Column (org.voltdb.catalog.Column)12 ColumnRef (org.voltdb.catalog.ColumnRef)10 Index (org.voltdb.catalog.Index)9 ArrayList (java.util.ArrayList)5 AbstractExpression (org.voltdb.expressions.AbstractExpression)5 HashSet (java.util.HashSet)4 JSONException (org.json_voltpatches.JSONException)4 Procedure (org.voltdb.catalog.Procedure)4 Statement (org.voltdb.catalog.Statement)4 VoltCompilerException (org.voltdb.compiler.VoltCompiler.VoltCompilerException)4 ConstraintType (org.voltdb.types.ConstraintType)4 HashMap (java.util.HashMap)3 VoltXMLElement (org.hsqldb_voltpatches.VoltXMLElement)3 VoltType (org.voltdb.VoltType)3 TupleValueExpression (org.voltdb.expressions.TupleValueExpression)3 SQLStmt (org.voltdb.SQLStmt)2 VoltSystemProcedure (org.voltdb.VoltSystemProcedure)2 VoltTable (org.voltdb.VoltTable)2