use of org.voltdb.expressions.AbstractExpression in project voltdb by VoltDB.
the class TestIndexSelection method checkIndexPredicateIsNull.
private void checkIndexPredicateIsNull(AbstractPlanNode pn) {
assertEquals(1, pn.getChildCount());
pn = pn.getChild(0);
assertEquals(PlanNodeType.INDEXSCAN, pn.getPlanNodeType());
IndexScanPlanNode ipn = (IndexScanPlanNode) pn;
AbstractExpression pred = ipn.getPredicate();
assertNull(pred);
}
use of org.voltdb.expressions.AbstractExpression in project voltdb by VoltDB.
the class TestIndexSelection method checkIndexPredicateContains.
private void checkIndexPredicateContains(AbstractPlanNode pn, String... columns) {
assertEquals(1, pn.getChildCount());
pn = pn.getChild(0);
assertEquals(PlanNodeType.INDEXSCAN, pn.getPlanNodeType());
IndexScanPlanNode ipn = (IndexScanPlanNode) pn;
AbstractExpression pred = ipn.getPredicate();
assertNotNull(pred);
List<TupleValueExpression> tves = pred.findAllTupleValueSubexpressions();
for (TupleValueExpression tve : tves) {
boolean match = false;
for (String column : columns) {
if (tve.getColumnName().equals(column)) {
match = true;
break;
}
}
assertTrue(match);
}
}
use of org.voltdb.expressions.AbstractExpression in project voltdb by VoltDB.
the class ProcedureCompiler method compileSingleStmtProcedure.
static void compileSingleStmtProcedure(VoltCompiler compiler, HSQLInterface hsql, DatabaseEstimates estimates, Database db, ProcedureDescriptor procedureDescriptor) throws VoltCompiler.VoltCompilerException {
final String className = procedureDescriptor.m_className;
if (className.indexOf('@') != -1) {
throw compiler.new VoltCompilerException("User procedure names can't contain \"@\".");
}
// get the short name of the class (no package if a user procedure)
// use the Table.<builtin> name (allowing the period) if builtin.
String shortName = className;
if (procedureDescriptor.m_builtInStmt == false) {
String[] parts = className.split("\\.");
shortName = parts[parts.length - 1];
}
// add an entry to the catalog (using the full className)
final Procedure procedure = db.getProcedures().add(shortName);
for (String groupName : procedureDescriptor.m_authGroups) {
final Group group = db.getGroups().get(groupName);
if (group == null) {
throw compiler.new VoltCompilerException("Procedure " + className + " allows access by a role " + groupName + " that does not exist");
}
final GroupRef groupRef = procedure.getAuthgroups().add(groupName);
groupRef.setGroup(group);
}
procedure.setClassname(className);
// sysprocs don't use the procedure compiler
procedure.setSystemproc(false);
procedure.setDefaultproc(procedureDescriptor.m_builtInStmt);
procedure.setHasjava(false);
procedure.setTransactional(true);
// get the annotation
// first try to get one that has been passed from the compiler
ProcInfoData info = compiler.getProcInfoOverride(shortName);
// and create a ProcInfo.Data instance for it
if (info == null) {
info = new ProcInfoData();
if (procedureDescriptor.m_partitionString != null) {
info.partitionInfo = procedureDescriptor.m_partitionString;
info.singlePartition = true;
}
}
assert (info != null);
// ADD THE STATEMENT
// add the statement to the catalog
Statement catalogStmt = procedure.getStatements().add(VoltDB.ANON_STMT_NAME);
// compile the statement
StatementPartitioning partitioning = info.singlePartition ? StatementPartitioning.forceSP() : StatementPartitioning.forceMP();
// default to FASTER detmode because stmt procs can't feed read output into writes
StatementCompiler.compileFromSqlTextAndUpdateCatalog(compiler, hsql, db, estimates, catalogStmt, procedureDescriptor.m_singleStmt, procedureDescriptor.m_joinOrder, DeterminismMode.FASTER, partitioning);
// if the single stmt is not read only, then the proc is not read only
boolean procHasWriteStmts = (catalogStmt.getReadonly() == false);
// set the read onlyness of a proc
procedure.setReadonly(procHasWriteStmts == false);
int seqs = catalogStmt.getSeqscancount();
procedure.setHasseqscans(seqs > 0);
// set procedure parameter types
CatalogMap<ProcParameter> params = procedure.getParameters();
CatalogMap<StmtParameter> stmtParams = catalogStmt.getParameters();
// set the procedure parameter types from the statement parameter types
int paramCount = 0;
for (StmtParameter stmtParam : CatalogUtil.getSortedCatalogItems(stmtParams, "index")) {
// name each parameter "param1", "param2", etc...
ProcParameter procParam = params.add("param" + String.valueOf(paramCount));
procParam.setIndex(stmtParam.getIndex());
procParam.setIsarray(stmtParam.getIsarray());
procParam.setType(stmtParam.getJavatype());
paramCount++;
}
// parse the procinfo
procedure.setSinglepartition(info.singlePartition);
if (info.singlePartition) {
parsePartitionInfo(compiler, db, procedure, info.partitionInfo);
if (procedure.getPartitionparameter() >= params.size()) {
String msg = "PartitionInfo parameter not a valid parameter for procedure: " + procedure.getClassname();
throw compiler.new VoltCompilerException(msg);
}
// TODO: The planner does not currently validate that a single-statement plan declared as single-partition correctly uses
// the designated parameter as a partitioning filter, maybe some day.
// In theory, the PartitioningForStatement would confirm the use of (only) a parameter as a partition key --
// or if the partition key was determined to be some other hard-coded constant (expression?) it might display a warning
// message that the passed parameter is assumed to be equal to that constant (expression).
} else {
if (partitioning.getCountOfIndependentlyPartitionedTables() == 1) {
AbstractExpression statementPartitionExpression = partitioning.singlePartitioningExpressionForReport();
if (statementPartitionExpression != null) {
// The planner has uncovered an overlooked opportunity to run the statement SP.
String msg = "This procedure " + shortName + " would benefit from being partitioned, by ";
String tableName = "tableName", partitionColumnName = "partitionColumnName";
try {
assert (partitioning.getFullColumnName() != null);
String[] array = partitioning.getFullColumnName().split("\\.");
tableName = array[0];
partitionColumnName = array[1];
} catch (Exception ex) {
}
if (statementPartitionExpression instanceof ParameterValueExpression) {
paramCount = ((ParameterValueExpression) statementPartitionExpression).getParameterIndex();
} else {
String valueDescription = null;
Object partitionValue = partitioning.getInferredPartitioningValue();
if (partitionValue == null) {
// Statement partitioned on a runtime constant. This is likely to be cryptic, but hopefully gets the idea across.
valueDescription = "of " + statementPartitionExpression.explain("");
} else {
// A simple constant value COULD have been a parameter.
valueDescription = partitionValue.toString();
}
msg += "adding a parameter to be passed the value " + valueDescription + " and ";
}
msg += "adding a 'PARTITION ON TABLE " + tableName + " COLUMN " + partitionColumnName + " PARAMETER " + paramCount + "' clause to the " + "CREATE PROCEDURE statement. or using a separate PARTITION PROCEDURE statement";
compiler.addWarn(msg);
}
}
}
}
use of org.voltdb.expressions.AbstractExpression in project voltdb by VoltDB.
the class PlanAssembler method setupForNewPlans.
/**
* Clear any old state and get ready to plan a new plan. The next call to
* getNextPlan() will return the first candidate plan for these parameters.
*
*/
private void setupForNewPlans(AbstractParsedStmt parsedStmt) {
m_bestAndOnlyPlanWasGenerated = false;
m_partitioning.analyzeTablePartitioning(parsedStmt.allScans());
if (parsedStmt instanceof ParsedUnionStmt) {
m_parsedUnion = (ParsedUnionStmt) parsedStmt;
return;
}
if (parsedStmt instanceof ParsedSelectStmt) {
if (tableListIncludesExportOnly(parsedStmt.m_tableList)) {
throw new PlanningErrorException("Illegal to read a stream.");
}
m_parsedSelect = (ParsedSelectStmt) parsedStmt;
// Simplify the outer join if possible
if (m_parsedSelect.m_joinTree instanceof BranchNode) {
if (!m_parsedSelect.hasJoinOrder()) {
simplifyOuterJoin((BranchNode) m_parsedSelect.m_joinTree);
}
// Convert RIGHT joins to the LEFT ones
((BranchNode) m_parsedSelect.m_joinTree).toLeftJoin();
}
m_subAssembler = new SelectSubPlanAssembler(m_catalogDb, m_parsedSelect, m_partitioning);
// Process the GROUP BY information, decide whether it is group by the partition column
if (isPartitionColumnInGroupbyList(m_parsedSelect.groupByColumns())) {
m_parsedSelect.setHasPartitionColumnInGroupby();
}
if (isPartitionColumnInWindowedAggregatePartitionByList()) {
m_parsedSelect.setHasPartitionColumnInWindowedAggregate();
}
return;
}
// check that no modification happens to views
if (tableListIncludesReadOnlyView(parsedStmt.m_tableList)) {
throw new PlanningErrorException("Illegal to modify a materialized view.");
}
m_partitioning.setIsDML();
// figure out which table we're updating/deleting
if (parsedStmt instanceof ParsedSwapStmt) {
assert (parsedStmt.m_tableList.size() == 2);
if (tableListIncludesExportOnly(parsedStmt.m_tableList)) {
throw new PlanningErrorException("Illegal to swap a stream.");
}
m_parsedSwap = (ParsedSwapStmt) parsedStmt;
return;
}
Table targetTable = parsedStmt.m_tableList.get(0);
if (targetTable.getIsreplicated()) {
if (m_partitioning.wasSpecifiedAsSingle() && !m_partitioning.isReplicatedDmlToRunOnAllPartitions()) {
String msg = "Trying to write to replicated table '" + targetTable.getTypeName() + "' in a single-partition procedure.";
throw new PlanningErrorException(msg);
}
} else if (m_partitioning.wasSpecifiedAsSingle() == false) {
m_partitioning.setPartitioningColumnForDML(targetTable.getPartitioncolumn());
}
if (parsedStmt instanceof ParsedInsertStmt) {
m_parsedInsert = (ParsedInsertStmt) parsedStmt;
// The currently handled inserts are too simple to even require a subplan assembler. So, done.
return;
}
if (parsedStmt instanceof ParsedUpdateStmt) {
if (tableListIncludesExportOnly(parsedStmt.m_tableList)) {
throw new PlanningErrorException("Illegal to update a stream.");
}
m_parsedUpdate = (ParsedUpdateStmt) parsedStmt;
} else if (parsedStmt instanceof ParsedDeleteStmt) {
if (tableListIncludesExportOnly(parsedStmt.m_tableList)) {
throw new PlanningErrorException("Illegal to delete from a stream.");
}
m_parsedDelete = (ParsedDeleteStmt) parsedStmt;
} else {
throw new RuntimeException("Unknown subclass of AbstractParsedStmt.");
}
if (!m_partitioning.wasSpecifiedAsSingle()) {
//TODO: When updates and deletes can contain joins, this step may have to be
// deferred so that the valueEquivalence set can be analyzed per join order.
// This appears to be an unfortunate side effect of how the HSQL interface
// misleadingly organizes the placement of join/where filters on the statement tree.
// This throws off the accounting of equivalence join filters until they can be
// normalized in analyzeJoinFilters, but that normalization process happens on a
// per-join-order basis, and so, so must this analysis.
HashMap<AbstractExpression, Set<AbstractExpression>> valueEquivalence = parsedStmt.analyzeValueEquivalence();
Collection<StmtTableScan> scans = parsedStmt.allScans();
m_partitioning.analyzeForMultiPartitionAccess(scans, valueEquivalence);
}
m_subAssembler = new WriterSubPlanAssembler(m_catalogDb, parsedStmt, m_partitioning);
}
use of org.voltdb.expressions.AbstractExpression in project voltdb by VoltDB.
the class PlanAssembler method getNextInsertPlan.
/**
* Get the next (only) plan for a SQL insertion. Inserts are pretty simple
* and this will only generate a single plan.
*
* @return The next (only) plan for a given insert statement, then null.
*/
private CompiledPlan getNextInsertPlan() {
// do it the right way once, then return null after that
if (m_bestAndOnlyPlanWasGenerated) {
return null;
}
m_bestAndOnlyPlanWasGenerated = true;
// figure out which table we're inserting into
assert (m_parsedInsert.m_tableList.size() == 1);
Table targetTable = m_parsedInsert.m_tableList.get(0);
StmtSubqueryScan subquery = m_parsedInsert.getSubqueryScan();
CompiledPlan retval = null;
String isContentDeterministic = null;
if (subquery != null) {
isContentDeterministic = subquery.calculateContentDeterminismMessage();
if (subquery.getBestCostPlan() == null) {
// in getBestCostPlan, above.
throw new PlanningErrorException("INSERT INTO ... SELECT subquery could not be planned: " + m_recentErrorMsg);
}
boolean targetIsExportTable = tableListIncludesExportOnly(m_parsedInsert.m_tableList);
InsertSubPlanAssembler subPlanAssembler = new InsertSubPlanAssembler(m_catalogDb, m_parsedInsert, m_partitioning, targetIsExportTable);
AbstractPlanNode subplan = subPlanAssembler.nextPlan();
if (subplan == null) {
throw new PlanningErrorException(subPlanAssembler.m_recentErrorMsg);
}
assert (m_partitioning.isJoinValid());
// Use the subquery's plan as the basis for the insert plan.
retval = subquery.getBestCostPlan();
} else {
retval = new CompiledPlan();
}
retval.setReadOnly(false);
// for the INSERT ... SELECT ... case, by analyzing the subquery.
if (m_parsedInsert.m_isUpsert) {
boolean hasPrimaryKey = false;
for (Constraint constraint : targetTable.getConstraints()) {
if (constraint.getType() != ConstraintType.PRIMARY_KEY.getValue()) {
continue;
}
hasPrimaryKey = true;
boolean targetsPrimaryKey = false;
for (ColumnRef colRef : constraint.getIndex().getColumns()) {
int primary = colRef.getColumn().getIndex();
for (Column targetCol : m_parsedInsert.m_columns.keySet()) {
if (targetCol.getIndex() == primary) {
targetsPrimaryKey = true;
break;
}
}
if (!targetsPrimaryKey) {
throw new PlanningErrorException("UPSERT on table \"" + targetTable.getTypeName() + "\" must specify a value for primary key \"" + colRef.getColumn().getTypeName() + "\".");
}
}
}
if (!hasPrimaryKey) {
throw new PlanningErrorException("UPSERT is not allowed on table \"" + targetTable.getTypeName() + "\" that has no primary key.");
}
}
CatalogMap<Column> targetTableColumns = targetTable.getColumns();
for (Column col : targetTableColumns) {
boolean needsValue = (!m_parsedInsert.m_isUpsert) && (col.getNullable() == false) && (col.getDefaulttype() == 0);
if (needsValue && !m_parsedInsert.m_columns.containsKey(col)) {
// This check could be done during parsing?
throw new PlanningErrorException("Column " + col.getName() + " has no default and is not nullable.");
}
// hint that this statement can be executed SP.
if (col.equals(m_partitioning.getPartitionColForDML()) && subquery == null) {
// When AdHoc insert-into-select is supported, we'll need to be able to infer
// partitioning of the sub-select
AbstractExpression expr = m_parsedInsert.getExpressionForPartitioning(col);
String fullColumnName = targetTable.getTypeName() + "." + col.getTypeName();
m_partitioning.addPartitioningExpression(fullColumnName, expr, expr.getValueType());
}
}
NodeSchema matSchema = null;
if (subquery == null) {
matSchema = new NodeSchema();
}
int[] fieldMap = new int[m_parsedInsert.m_columns.size()];
int i = 0;
// - For VALUES(...) insert statements, build the materialize node's schema
for (Map.Entry<Column, AbstractExpression> e : m_parsedInsert.m_columns.entrySet()) {
Column col = e.getKey();
fieldMap[i] = col.getIndex();
if (matSchema != null) {
AbstractExpression valExpr = e.getValue();
valExpr.setInBytes(col.getInbytes());
// Patch over any mismatched expressions with an explicit cast.
// Most impossible-to-cast type combinations should have already been caught by the
// parser, but there are also runtime checks in the casting code
// -- such as for out of range values.
valExpr = castExprIfNeeded(valExpr, col);
matSchema.addColumn(AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, col.getTypeName(), col.getTypeName(), valExpr);
}
i++;
}
// the root of the insert plan may be an InsertPlanNode, or
// it may be a scan plan node. We may do an inline InsertPlanNode
// as well.
InsertPlanNode insertNode = new InsertPlanNode();
insertNode.setTargetTableName(targetTable.getTypeName());
if (subquery != null) {
insertNode.setSourceIsPartitioned(!subquery.getIsReplicated());
}
// The field map tells the insert node
// where to put values produced by child into the row to be inserted.
insertNode.setFieldMap(fieldMap);
AbstractPlanNode root = insertNode;
if (matSchema != null) {
MaterializePlanNode matNode = new MaterializePlanNode(matSchema);
// connect the insert and the materialize nodes together
insertNode.addAndLinkChild(matNode);
retval.statementGuaranteesDeterminism(false, true, isContentDeterministic);
} else {
ScanPlanNodeWithInlineInsert planNode = (retval.rootPlanGraph instanceof ScanPlanNodeWithInlineInsert) ? ((ScanPlanNodeWithInlineInsert) retval.rootPlanGraph) : null;
// Inline upsert might be possible, but not now.
if (planNode != null && (!m_parsedInsert.m_isUpsert) && (!planNode.hasInlineAggregateNode())) {
planNode.addInlinePlanNode(insertNode);
root = planNode.getAbstractNode();
} else {
// Otherwise just make it out-of-line.
insertNode.addAndLinkChild(retval.rootPlanGraph);
}
}
if (m_partitioning.wasSpecifiedAsSingle() || m_partitioning.isInferredSingle()) {
insertNode.setMultiPartition(false);
retval.rootPlanGraph = root;
return retval;
}
insertNode.setMultiPartition(true);
// Add a compensating sum of modified tuple counts or a limit 1
// AND a send on top of a union-like receive node.
boolean isReplicated = targetTable.getIsreplicated();
retval.rootPlanGraph = addCoordinatorToDMLNode(root, isReplicated);
return retval;
}
Aggregations