use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.
the class DDLCompiler method addIndexToCatalog.
private static void addIndexToCatalog(Database db, Table table, VoltXMLElement node, Map<String, String> indexReplacementMap, HashMap<String, Index> indexMap, HashMap<String, Column> columnMap, VoltCompiler compiler) throws VoltCompilerException {
assert node.name.equals("index");
String name = node.attributes.get("name");
boolean unique = Boolean.parseBoolean(node.attributes.get("unique"));
boolean assumeUnique = Boolean.parseBoolean(node.attributes.get("assumeunique"));
AbstractParsedStmt dummy = new ParsedSelectStmt(null, db);
dummy.setDDLIndexedTable(table);
StringBuffer msg = new StringBuffer(String.format("Index \"%s\" ", name));
// "parse" the expression trees for an expression-based index (vs. a simple column value index)
List<AbstractExpression> exprs = null;
// "parse" the WHERE expression for partial index if any
AbstractExpression predicate = null;
// Some expressions have special validation in indices. Not all the expression
// can be indexed. We scan for result type at first here and block those which
// can't be indexed like boolean, geo ... We gather rest of expression into
// checkExpressions list. We will check on them all at once.
List<AbstractExpression> checkExpressions = new ArrayList<>();
for (VoltXMLElement subNode : node.children) {
if (subNode.name.equals("exprs")) {
exprs = new ArrayList<>();
for (VoltXMLElement exprNode : subNode.children) {
AbstractExpression expr = dummy.parseExpressionTree(exprNode);
expr.resolveForTable(table);
expr.finalizeValueTypes();
// string will be populated with an expression's details when
// its value type is not indexable
StringBuffer exprMsg = new StringBuffer();
if (!expr.isValueTypeIndexable(exprMsg)) {
// indexing on expression with boolean result is not supported.
throw compiler.new VoltCompilerException("Cannot create index \"" + name + "\" because it contains " + exprMsg + ", which is not supported.");
}
if ((unique || assumeUnique) && !expr.isValueTypeUniqueIndexable(exprMsg)) {
// indexing on expression with boolean result is not supported.
throw compiler.new VoltCompilerException("Cannot create unique index \"" + name + "\" because it contains " + exprMsg + ", which is not supported.");
}
// rest of the validity guards will be evaluated after collecting all the expressions.
checkExpressions.add(expr);
exprs.add(expr);
}
} else if (subNode.name.equals("predicate")) {
assert (subNode.children.size() == 1);
VoltXMLElement predicateXML = subNode.children.get(0);
assert (predicateXML != null);
predicate = buildPartialIndexPredicate(dummy, name, predicateXML, table, compiler);
}
}
// Check all the subexpressions we gathered up.
if (!AbstractExpression.validateExprsForIndexesAndMVs(checkExpressions, msg)) {
// The error message will be in the StringBuffer msg.
throw compiler.new VoltCompilerException(msg.toString());
}
String colList = node.attributes.get("columns");
String[] colNames = colList.split(",");
Column[] columns = new Column[colNames.length];
boolean has_nonint_col = false;
boolean has_geo_col = false;
String nonint_col_name = null;
for (int i = 0; i < colNames.length; i++) {
columns[i] = columnMap.get(colNames[i]);
if (columns[i] == null) {
return;
}
}
UnsafeOperatorsForDDL unsafeOps = new UnsafeOperatorsForDDL();
if (exprs == null) {
for (int i = 0; i < colNames.length; i++) {
VoltType colType = VoltType.get((byte) columns[i].getType());
if (!colType.isIndexable()) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " values are not currently supported as index keys: \"" + colNames[i] + "\"";
throw compiler.new VoltCompilerException(emsg);
}
if ((unique || assumeUnique) && !colType.isUniqueIndexable()) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " values are not currently supported as unique index keys: \"" + colNames[i] + "\"";
throw compiler.new VoltCompilerException(emsg);
}
if (!colType.isBackendIntegerType()) {
has_nonint_col = true;
nonint_col_name = colNames[i];
has_geo_col = colType.equals(VoltType.GEOGRAPHY);
if (has_geo_col && colNames.length > 1) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " values must be the only component of an index key: \"" + nonint_col_name + "\"";
throw compiler.new VoltCompilerException(emsg);
}
}
}
} else {
for (AbstractExpression expression : exprs) {
VoltType colType = expression.getValueType();
if (!colType.isIndexable()) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " valued expressions are not currently supported as index keys.";
throw compiler.new VoltCompilerException(emsg);
}
if ((unique || assumeUnique) && !colType.isUniqueIndexable()) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " valued expressions are not currently supported as unique index keys.";
throw compiler.new VoltCompilerException(emsg);
}
if (!colType.isBackendIntegerType()) {
has_nonint_col = true;
nonint_col_name = "<expression>";
has_geo_col = colType.equals(VoltType.GEOGRAPHY);
if (has_geo_col) {
if (exprs.size() > 1) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " values must be the only component of an index key.";
throw compiler.new VoltCompilerException(emsg);
} else if (!(expression instanceof TupleValueExpression)) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " expressions must be simple column expressions.";
throw compiler.new VoltCompilerException(emsg);
}
}
}
expression.findUnsafeOperatorsForDDL(unsafeOps);
}
}
Index index = table.getIndexes().add(name);
index.setCountable(false);
index.setIssafewithnonemptysources(!unsafeOps.isUnsafe());
// Set the index type. It will be one of:
// - Covering cell index (geo index for CONTAINS predicates)
// - HASH index (set in HSQL because "hash" is in the name of the
// constraint or the index
// - TREE index, which is the default
boolean isHashIndex = node.attributes.get("ishashindex").equals("true");
if (has_geo_col) {
index.setType(IndexType.COVERING_CELL_INDEX.getValue());
} else if (isHashIndex) {
// warn user that hash index will be deprecated
compiler.addWarn("Hash indexes are deprecated. In a future release, VoltDB will only support tree indexes, even if the index name contains the string \"hash\"");
// make the index a hash.
if (has_nonint_col) {
String emsg = "Index " + name + " in table " + table.getTypeName() + " uses a non-hashable column " + nonint_col_name;
throw compiler.new VoltCompilerException(emsg);
}
index.setType(IndexType.HASH_TABLE.getValue());
} else {
index.setType(IndexType.BALANCED_TREE.getValue());
index.setCountable(true);
}
// but they still represent the columns that will trigger an index update when their values change.
for (int i = 0; i < columns.length; i++) {
ColumnRef cref = index.getColumns().add(columns[i].getTypeName());
cref.setColumn(columns[i]);
cref.setIndex(i);
}
if (exprs != null) {
try {
index.setExpressionsjson(convertToJSONArray(exprs));
} catch (JSONException e) {
throw compiler.new VoltCompilerException("Unexpected error serializing non-column expressions for index '" + name + "' on type '" + table.getTypeName() + "': " + e.toString());
}
}
index.setUnique(unique);
if (assumeUnique) {
index.setUnique(true);
}
index.setAssumeunique(assumeUnique);
if (predicate != null) {
try {
index.setPredicatejson(convertToJSONObject(predicate));
} catch (JSONException e) {
throw compiler.new VoltCompilerException("Unexpected error serializing predicate for partial index '" + name + "' on type '" + table.getTypeName() + "': " + e.toString());
}
}
// will make two indexes different
for (Index existingIndex : table.getIndexes()) {
// skip thineself
if (existingIndex == index) {
continue;
}
if (indexesAreDups(existingIndex, index)) {
// replace any constraints using one index with the other
//for () TODO
// get ready for replacements from constraints created later
indexReplacementMap.put(index.getTypeName(), existingIndex.getTypeName());
// if the index is a user-named index...
if (index.getTypeName().startsWith(HSQLInterface.AUTO_GEN_PREFIX) == false) {
// on dup-detection, add a warning but don't fail
String emsg = String.format("Dropping index %s on table %s because it duplicates index %s.", index.getTypeName(), table.getTypeName(), existingIndex.getTypeName());
compiler.addWarn(emsg);
}
// drop the index and GTFO
table.getIndexes().delete(index.getTypeName());
return;
}
}
String smsg = "Created index: " + name + " on table: " + table.getTypeName() + " of type: " + IndexType.get(index.getType()).name();
compiler.addInfo(smsg);
indexMap.put(name, index);
}
use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.
the class DDLCompiler method addTableToCatalog.
private void addTableToCatalog(Database db, VoltXMLElement node, boolean isXDCR) throws VoltCompilerException {
assert node.name.equals("table");
// Construct table-specific maps
HashMap<String, Column> columnMap = new HashMap<>();
HashMap<String, Index> indexMap = new HashMap<>();
final String name = node.attributes.get("name");
// create a table node in the catalog
final Table table = db.getTables().add(name);
// set max value before return for view table
table.setTuplelimit(Integer.MAX_VALUE);
// add the original DDL to the table (or null if it's not there)
TableAnnotation annotation = new TableAnnotation();
table.setAnnotation(annotation);
// handle the case where this is a materialized view
final String query = node.attributes.get("query");
if (query != null) {
assert (query.length() > 0);
m_matViewMap.put(table, query);
}
final boolean isStream = (node.attributes.get("stream") != null);
final String streamTarget = node.attributes.get("export");
final String streamPartitionColumn = node.attributes.get("partitioncolumn");
// all tables start replicated
// if a partition is found in the project file later,
// then this is reversed
table.setIsreplicated(true);
// map of index replacements for later constraint fixup
final Map<String, String> indexReplacementMap = new TreeMap<>();
// Need the columnTypes sorted by column index.
SortedMap<Integer, VoltType> columnTypes = new TreeMap<>();
for (VoltXMLElement subNode : node.children) {
if (subNode.name.equals("columns")) {
int colIndex = 0;
for (VoltXMLElement columnNode : subNode.children) {
if (columnNode.name.equals("column")) {
addColumnToCatalog(table, columnNode, columnTypes, columnMap, m_compiler);
colIndex++;
}
}
// limit the total number of columns in a table
if (colIndex > MAX_COLUMNS) {
String msg = "Table " + name + " has " + colIndex + " columns (max is " + MAX_COLUMNS + ")";
throw m_compiler.new VoltCompilerException(msg);
}
}
if (subNode.name.equals("indexes")) {
// that refer to them.
for (VoltXMLElement indexNode : subNode.children) {
if (indexNode.name.equals("index") == false)
continue;
String indexName = indexNode.attributes.get("name");
if (indexName.startsWith(HSQLInterface.AUTO_GEN_IDX_PREFIX) == false) {
addIndexToCatalog(db, table, indexNode, indexReplacementMap, indexMap, columnMap, m_compiler);
}
}
for (VoltXMLElement indexNode : subNode.children) {
if (indexNode.name.equals("index") == false)
continue;
String indexName = indexNode.attributes.get("name");
if (indexName.startsWith(HSQLInterface.AUTO_GEN_IDX_PREFIX) == true) {
addIndexToCatalog(db, table, indexNode, indexReplacementMap, indexMap, columnMap, m_compiler);
}
}
}
if (subNode.name.equals("constraints")) {
for (VoltXMLElement constraintNode : subNode.children) {
if (constraintNode.name.equals("constraint")) {
addConstraintToCatalog(table, constraintNode, indexReplacementMap, indexMap);
}
}
}
}
// Warn user if DR table don't have any unique index.
if (isXDCR && node.attributes.get("drTable") != null && node.attributes.get("drTable").equalsIgnoreCase("ENABLE")) {
boolean hasUniqueIndex = false;
for (Index index : table.getIndexes()) {
if (index.getUnique()) {
hasUniqueIndex = true;
break;
}
}
if (!hasUniqueIndex) {
String info = String.format("Table %s doesn't have any unique index, it will cause full table scans to update/delete DR record and may become slower as table grow.", table.getTypeName());
m_compiler.addWarn(info);
}
}
table.setSignature(CatalogUtil.getSignatureForTable(name, columnTypes));
/*
* Validate that each variable-length column is below the max value length,
* and that the maximum size for the row is below the max row length.
*/
int maxRowSize = 0;
for (Column c : columnMap.values()) {
VoltType t = VoltType.get((byte) c.getType());
if (t == VoltType.STRING && (!c.getInbytes())) {
if (c.getSize() * MAX_BYTES_PER_UTF8_CHARACTER > VoltType.MAX_VALUE_LENGTH) {
throw m_compiler.new VoltCompilerException("Column " + name + "." + c.getName() + " specifies a maximum size of " + c.getSize() + " characters" + " but the maximum supported size is " + VoltType.humanReadableSize(VoltType.MAX_VALUE_LENGTH / MAX_BYTES_PER_UTF8_CHARACTER) + " characters or " + VoltType.humanReadableSize(VoltType.MAX_VALUE_LENGTH) + " bytes");
}
maxRowSize += 4 + c.getSize() * MAX_BYTES_PER_UTF8_CHARACTER;
} else if (t.isVariableLength()) {
if (c.getSize() > VoltType.MAX_VALUE_LENGTH) {
throw m_compiler.new VoltCompilerException("Column " + name + "." + c.getName() + " specifies a maximum size of " + c.getSize() + " bytes" + " but the maximum supported size is " + VoltType.humanReadableSize(VoltType.MAX_VALUE_LENGTH));
}
maxRowSize += 4 + c.getSize();
} else {
maxRowSize += t.getLengthInBytesForFixedTypes();
}
}
if (maxRowSize > MAX_ROW_SIZE) {
throw m_compiler.new VoltCompilerException("Error: Table " + name + " has a maximum row size of " + maxRowSize + " but the maximum supported row size is " + MAX_ROW_SIZE);
}
// the DDL statement for the VIEW
if (query != null) {
annotation.ddl = query;
} else {
// Get the final DDL for the table rebuilt from the catalog object
// Don't need a real StringBuilder or export state to get the CREATE for a table
annotation.ddl = CatalogSchemaTools.toSchema(new StringBuilder(), table, query, isStream, streamPartitionColumn, streamTarget);
}
}
use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.
the class PlanAssembler method getNextInsertPlan.
/**
* Get the next (only) plan for a SQL insertion. Inserts are pretty simple
* and this will only generate a single plan.
*
* @return The next (only) plan for a given insert statement, then null.
*/
private CompiledPlan getNextInsertPlan() {
// do it the right way once, then return null after that
if (m_bestAndOnlyPlanWasGenerated) {
return null;
}
m_bestAndOnlyPlanWasGenerated = true;
// figure out which table we're inserting into
assert (m_parsedInsert.m_tableList.size() == 1);
Table targetTable = m_parsedInsert.m_tableList.get(0);
StmtSubqueryScan subquery = m_parsedInsert.getSubqueryScan();
CompiledPlan retval = null;
String isContentDeterministic = null;
if (subquery != null) {
isContentDeterministic = subquery.calculateContentDeterminismMessage();
if (subquery.getBestCostPlan() == null) {
// in getBestCostPlan, above.
throw new PlanningErrorException("INSERT INTO ... SELECT subquery could not be planned: " + m_recentErrorMsg);
}
boolean targetIsExportTable = tableListIncludesExportOnly(m_parsedInsert.m_tableList);
InsertSubPlanAssembler subPlanAssembler = new InsertSubPlanAssembler(m_catalogDb, m_parsedInsert, m_partitioning, targetIsExportTable);
AbstractPlanNode subplan = subPlanAssembler.nextPlan();
if (subplan == null) {
throw new PlanningErrorException(subPlanAssembler.m_recentErrorMsg);
}
assert (m_partitioning.isJoinValid());
// Use the subquery's plan as the basis for the insert plan.
retval = subquery.getBestCostPlan();
} else {
retval = new CompiledPlan();
}
retval.setReadOnly(false);
// for the INSERT ... SELECT ... case, by analyzing the subquery.
if (m_parsedInsert.m_isUpsert) {
boolean hasPrimaryKey = false;
for (Constraint constraint : targetTable.getConstraints()) {
if (constraint.getType() != ConstraintType.PRIMARY_KEY.getValue()) {
continue;
}
hasPrimaryKey = true;
boolean targetsPrimaryKey = false;
for (ColumnRef colRef : constraint.getIndex().getColumns()) {
int primary = colRef.getColumn().getIndex();
for (Column targetCol : m_parsedInsert.m_columns.keySet()) {
if (targetCol.getIndex() == primary) {
targetsPrimaryKey = true;
break;
}
}
if (!targetsPrimaryKey) {
throw new PlanningErrorException("UPSERT on table \"" + targetTable.getTypeName() + "\" must specify a value for primary key \"" + colRef.getColumn().getTypeName() + "\".");
}
}
}
if (!hasPrimaryKey) {
throw new PlanningErrorException("UPSERT is not allowed on table \"" + targetTable.getTypeName() + "\" that has no primary key.");
}
}
CatalogMap<Column> targetTableColumns = targetTable.getColumns();
for (Column col : targetTableColumns) {
boolean needsValue = (!m_parsedInsert.m_isUpsert) && (col.getNullable() == false) && (col.getDefaulttype() == 0);
if (needsValue && !m_parsedInsert.m_columns.containsKey(col)) {
// This check could be done during parsing?
throw new PlanningErrorException("Column " + col.getName() + " has no default and is not nullable.");
}
// hint that this statement can be executed SP.
if (col.equals(m_partitioning.getPartitionColForDML()) && subquery == null) {
// When AdHoc insert-into-select is supported, we'll need to be able to infer
// partitioning of the sub-select
AbstractExpression expr = m_parsedInsert.getExpressionForPartitioning(col);
String fullColumnName = targetTable.getTypeName() + "." + col.getTypeName();
m_partitioning.addPartitioningExpression(fullColumnName, expr, expr.getValueType());
}
}
NodeSchema matSchema = null;
if (subquery == null) {
matSchema = new NodeSchema();
}
int[] fieldMap = new int[m_parsedInsert.m_columns.size()];
int i = 0;
// - For VALUES(...) insert statements, build the materialize node's schema
for (Map.Entry<Column, AbstractExpression> e : m_parsedInsert.m_columns.entrySet()) {
Column col = e.getKey();
fieldMap[i] = col.getIndex();
if (matSchema != null) {
AbstractExpression valExpr = e.getValue();
valExpr.setInBytes(col.getInbytes());
// Patch over any mismatched expressions with an explicit cast.
// Most impossible-to-cast type combinations should have already been caught by the
// parser, but there are also runtime checks in the casting code
// -- such as for out of range values.
valExpr = castExprIfNeeded(valExpr, col);
matSchema.addColumn(AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, col.getTypeName(), col.getTypeName(), valExpr);
}
i++;
}
// the root of the insert plan may be an InsertPlanNode, or
// it may be a scan plan node. We may do an inline InsertPlanNode
// as well.
InsertPlanNode insertNode = new InsertPlanNode();
insertNode.setTargetTableName(targetTable.getTypeName());
if (subquery != null) {
insertNode.setSourceIsPartitioned(!subquery.getIsReplicated());
}
// The field map tells the insert node
// where to put values produced by child into the row to be inserted.
insertNode.setFieldMap(fieldMap);
AbstractPlanNode root = insertNode;
if (matSchema != null) {
MaterializePlanNode matNode = new MaterializePlanNode(matSchema);
// connect the insert and the materialize nodes together
insertNode.addAndLinkChild(matNode);
retval.statementGuaranteesDeterminism(false, true, isContentDeterministic);
} else {
ScanPlanNodeWithInlineInsert planNode = (retval.rootPlanGraph instanceof ScanPlanNodeWithInlineInsert) ? ((ScanPlanNodeWithInlineInsert) retval.rootPlanGraph) : null;
// Inline upsert might be possible, but not now.
if (planNode != null && (!m_parsedInsert.m_isUpsert) && (!planNode.hasInlineAggregateNode())) {
planNode.addInlinePlanNode(insertNode);
root = planNode.getAbstractNode();
} else {
// Otherwise just make it out-of-line.
insertNode.addAndLinkChild(retval.rootPlanGraph);
}
}
if (m_partitioning.wasSpecifiedAsSingle() || m_partitioning.isInferredSingle()) {
insertNode.setMultiPartition(false);
retval.rootPlanGraph = root;
return retval;
}
insertNode.setMultiPartition(true);
// Add a compensating sum of modified tuple counts or a limit 1
// AND a send on top of a union-like receive node.
boolean isReplicated = targetTable.getIsreplicated();
retval.rootPlanGraph = addCoordinatorToDMLNode(root, isReplicated);
return retval;
}
use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.
the class QueryPlanner method compileFromXML.
/**
* Find the best plan given the VoltXMLElement. By best here we mean the plan
* which is scored the best according to our plan metric scoring. The plan
* metric scoring takes into account join order and index use, but it does
* not take into account the output schema. Consequently, we don't compute the
* output schema for the plan nodes until after the best plan is discovered.
*
* The order here is:
* <ol>
* <li>
* Parse the VoltXMLElement to create an AbstractParsedStatement. This has
* a second effect of loading lists of join orders and access paths for planning.
* For us, and access path is a way of scanning something scannable. It's a generalization
* of the notion of scanning a table or an index.
* </li>
* <li>
* Create a PlanAssembler, and ask it for the best cost plan. This uses the
* side data created by the parser in the previous step.
* </li>
* <li>
* If the plan is read only, slap a SendPlanNode on the front. Presumably
* an insert, delete or upsert will have added the SendPlanNode into the plan node tree already.
* </li>
* <li>
* Compute the output schema. This computes the output schema for each
* node recursively, using a node specific method.
* </li>
* <li>
* Resolve the column indices. This makes sure that the indices of all
* TVEs in the output columns refer to the right input columns.
* </li>
* <li>
* Do some final cleaning up and verifying of the plan. For example,
* We renumber the nodes starting at 1.
* </li>
* </ol>
*
* @param xmlSQL
* @param paramValues
* @return
*/
private CompiledPlan compileFromXML(VoltXMLElement xmlSQL, String[] paramValues) {
// Get a parsed statement from the xml
// The callers of compilePlan are ready to catch any exceptions thrown here.
AbstractParsedStmt parsedStmt = AbstractParsedStmt.parse(m_sql, xmlSQL, paramValues, m_db, m_joinOrder);
if (parsedStmt == null) {
m_recentErrorMsg = "Failed to parse SQL statement: " + getOriginalSql();
return null;
}
if (m_isUpsert) {
// no insert/upsert with joins
if (parsedStmt.m_tableList.size() != 1) {
m_recentErrorMsg = "UPSERT is supported only with one single table: " + getOriginalSql();
return null;
}
Table tb = parsedStmt.m_tableList.get(0);
Constraint pkey = null;
for (Constraint ct : tb.getConstraints()) {
if (ct.getType() == ConstraintType.PRIMARY_KEY.getValue()) {
pkey = ct;
break;
}
}
if (pkey == null) {
m_recentErrorMsg = "Unsupported UPSERT table without primary key: " + getOriginalSql();
return null;
}
}
m_planSelector.outputParsedStatement(parsedStmt);
// Init Assembler. Each plan assembler requires a new instance of the PlanSelector
// to keep track of the best plan
PlanAssembler assembler = new PlanAssembler(m_db, m_partitioning, (PlanSelector) m_planSelector.clone());
// find the plan with minimal cost
CompiledPlan bestPlan = assembler.getBestCostPlan(parsedStmt);
// make sure we got a winner
if (bestPlan == null) {
if (m_debuggingStaticModeToRetryOnError) {
assembler.getBestCostPlan(parsedStmt);
}
m_recentErrorMsg = assembler.getErrorMessage();
if (m_recentErrorMsg == null) {
m_recentErrorMsg = "Unable to plan for statement. Error unknown.";
}
return null;
}
if (bestPlan.isReadOnly()) {
SendPlanNode sendNode = new SendPlanNode();
// connect the nodes to build the graph
sendNode.addAndLinkChild(bestPlan.rootPlanGraph);
// this plan is final, generate schema and resolve all the column index references
bestPlan.rootPlanGraph = sendNode;
}
// Execute the generateOutputSchema and resolveColumnIndexes once for the best plan
bestPlan.rootPlanGraph.generateOutputSchema(m_db);
bestPlan.rootPlanGraph.resolveColumnIndexes();
if (parsedStmt instanceof ParsedSelectStmt) {
List<SchemaColumn> columns = bestPlan.rootPlanGraph.getOutputSchema().getColumns();
((ParsedSelectStmt) parsedStmt).checkPlanColumnMatch(columns);
}
// Output the best plan debug info
assembler.finalizeBestCostPlan();
// reset all the plan node ids for a given plan
// this makes the ids deterministic
bestPlan.resetPlanNodeIds(1);
// split up the plan everywhere we see send/receive into multiple plan fragments
List<AbstractPlanNode> receives = bestPlan.rootPlanGraph.findAllNodesOfClass(AbstractReceivePlanNode.class);
if (receives.size() > 1) {
// Have too many receive node for two fragment plan limit
m_recentErrorMsg = "This join of multiple partitioned tables is too complex. " + "Consider simplifying its subqueries: " + getOriginalSql();
return null;
}
/*/ enable for debug ...
if (receives.size() > 1) {
System.out.println(plan.rootPlanGraph.toExplainPlanString());
}
// ... enable for debug */
if (receives.size() == 1) {
AbstractReceivePlanNode recvNode = (AbstractReceivePlanNode) receives.get(0);
fragmentize(bestPlan, recvNode);
}
return bestPlan;
}
use of org.voltdb.catalog.Constraint in project voltdb by VoltDB.
the class TestCatalogUtil method testToSchema.
/**
*
*/
public void testToSchema() {
String search_str = "";
// Simple check to make sure things look ok...
for (Table catalog_tbl : catalog_db.getTables()) {
StringBuilder sb = new StringBuilder();
CatalogSchemaTools.toSchema(sb, catalog_tbl, null, false, null, null);
String sql = sb.toString();
assertTrue(sql.startsWith("CREATE TABLE " + catalog_tbl.getTypeName()));
// Columns
for (Column catalog_col : catalog_tbl.getColumns()) {
assertTrue(sql.indexOf(catalog_col.getTypeName()) != -1);
}
// Constraints
for (Constraint catalog_const : catalog_tbl.getConstraints()) {
ConstraintType const_type = ConstraintType.get(catalog_const.getType());
Index catalog_idx = catalog_const.getIndex();
List<ColumnRef> columns = CatalogUtil.getSortedCatalogItems(catalog_idx.getColumns(), "index");
if (!columns.isEmpty()) {
search_str = "";
String add = "";
for (ColumnRef catalog_colref : columns) {
search_str += add + catalog_colref.getColumn().getTypeName();
add = ", ";
}
assertTrue(sql.indexOf(search_str) != -1);
}
switch(const_type) {
case PRIMARY_KEY:
assertTrue(sql.indexOf("PRIMARY KEY") != -1);
break;
case FOREIGN_KEY:
search_str = "REFERENCES " + catalog_const.getForeignkeytable().getTypeName();
assertTrue(sql.indexOf(search_str) != -1);
break;
}
}
}
}
Aggregations