use of org.voltdb.plannodes.MaterializePlanNode in project voltdb by VoltDB.
the class PlanAssembler method getNextInsertPlan.
/**
* Get the next (only) plan for a SQL insertion. Inserts are pretty simple
* and this will only generate a single plan.
*
* @return The next (only) plan for a given insert statement, then null.
*/
private CompiledPlan getNextInsertPlan() {
// do it the right way once, then return null after that
if (m_bestAndOnlyPlanWasGenerated) {
return null;
}
m_bestAndOnlyPlanWasGenerated = true;
// figure out which table we're inserting into
assert (m_parsedInsert.m_tableList.size() == 1);
Table targetTable = m_parsedInsert.m_tableList.get(0);
StmtSubqueryScan subquery = m_parsedInsert.getSubqueryScan();
CompiledPlan retval = null;
String isContentDeterministic = null;
if (subquery != null) {
isContentDeterministic = subquery.calculateContentDeterminismMessage();
if (subquery.getBestCostPlan() == null) {
// in getBestCostPlan, above.
throw new PlanningErrorException("INSERT INTO ... SELECT subquery could not be planned: " + m_recentErrorMsg);
}
boolean targetIsExportTable = tableListIncludesExportOnly(m_parsedInsert.m_tableList);
InsertSubPlanAssembler subPlanAssembler = new InsertSubPlanAssembler(m_catalogDb, m_parsedInsert, m_partitioning, targetIsExportTable);
AbstractPlanNode subplan = subPlanAssembler.nextPlan();
if (subplan == null) {
throw new PlanningErrorException(subPlanAssembler.m_recentErrorMsg);
}
assert (m_partitioning.isJoinValid());
// Use the subquery's plan as the basis for the insert plan.
retval = subquery.getBestCostPlan();
} else {
retval = new CompiledPlan();
}
retval.setReadOnly(false);
// for the INSERT ... SELECT ... case, by analyzing the subquery.
if (m_parsedInsert.m_isUpsert) {
boolean hasPrimaryKey = false;
for (Constraint constraint : targetTable.getConstraints()) {
if (constraint.getType() != ConstraintType.PRIMARY_KEY.getValue()) {
continue;
}
hasPrimaryKey = true;
boolean targetsPrimaryKey = false;
for (ColumnRef colRef : constraint.getIndex().getColumns()) {
int primary = colRef.getColumn().getIndex();
for (Column targetCol : m_parsedInsert.m_columns.keySet()) {
if (targetCol.getIndex() == primary) {
targetsPrimaryKey = true;
break;
}
}
if (!targetsPrimaryKey) {
throw new PlanningErrorException("UPSERT on table \"" + targetTable.getTypeName() + "\" must specify a value for primary key \"" + colRef.getColumn().getTypeName() + "\".");
}
}
}
if (!hasPrimaryKey) {
throw new PlanningErrorException("UPSERT is not allowed on table \"" + targetTable.getTypeName() + "\" that has no primary key.");
}
}
CatalogMap<Column> targetTableColumns = targetTable.getColumns();
for (Column col : targetTableColumns) {
boolean needsValue = (!m_parsedInsert.m_isUpsert) && (col.getNullable() == false) && (col.getDefaulttype() == 0);
if (needsValue && !m_parsedInsert.m_columns.containsKey(col)) {
// This check could be done during parsing?
throw new PlanningErrorException("Column " + col.getName() + " has no default and is not nullable.");
}
// hint that this statement can be executed SP.
if (col.equals(m_partitioning.getPartitionColForDML()) && subquery == null) {
// When AdHoc insert-into-select is supported, we'll need to be able to infer
// partitioning of the sub-select
AbstractExpression expr = m_parsedInsert.getExpressionForPartitioning(col);
String fullColumnName = targetTable.getTypeName() + "." + col.getTypeName();
m_partitioning.addPartitioningExpression(fullColumnName, expr, expr.getValueType());
}
}
NodeSchema matSchema = null;
if (subquery == null) {
matSchema = new NodeSchema();
}
int[] fieldMap = new int[m_parsedInsert.m_columns.size()];
int i = 0;
// - For VALUES(...) insert statements, build the materialize node's schema
for (Map.Entry<Column, AbstractExpression> e : m_parsedInsert.m_columns.entrySet()) {
Column col = e.getKey();
fieldMap[i] = col.getIndex();
if (matSchema != null) {
AbstractExpression valExpr = e.getValue();
valExpr.setInBytes(col.getInbytes());
// Patch over any mismatched expressions with an explicit cast.
// Most impossible-to-cast type combinations should have already been caught by the
// parser, but there are also runtime checks in the casting code
// -- such as for out of range values.
valExpr = castExprIfNeeded(valExpr, col);
matSchema.addColumn(AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, col.getTypeName(), col.getTypeName(), valExpr);
}
i++;
}
// the root of the insert plan may be an InsertPlanNode, or
// it may be a scan plan node. We may do an inline InsertPlanNode
// as well.
InsertPlanNode insertNode = new InsertPlanNode();
insertNode.setTargetTableName(targetTable.getTypeName());
if (subquery != null) {
insertNode.setSourceIsPartitioned(!subquery.getIsReplicated());
}
// The field map tells the insert node
// where to put values produced by child into the row to be inserted.
insertNode.setFieldMap(fieldMap);
AbstractPlanNode root = insertNode;
if (matSchema != null) {
MaterializePlanNode matNode = new MaterializePlanNode(matSchema);
// connect the insert and the materialize nodes together
insertNode.addAndLinkChild(matNode);
retval.statementGuaranteesDeterminism(false, true, isContentDeterministic);
} else {
ScanPlanNodeWithInlineInsert planNode = (retval.rootPlanGraph instanceof ScanPlanNodeWithInlineInsert) ? ((ScanPlanNodeWithInlineInsert) retval.rootPlanGraph) : null;
// Inline upsert might be possible, but not now.
if (planNode != null && (!m_parsedInsert.m_isUpsert) && (!planNode.hasInlineAggregateNode())) {
planNode.addInlinePlanNode(insertNode);
root = planNode.getAbstractNode();
} else {
// Otherwise just make it out-of-line.
insertNode.addAndLinkChild(retval.rootPlanGraph);
}
}
if (m_partitioning.wasSpecifiedAsSingle() || m_partitioning.isInferredSingle()) {
insertNode.setMultiPartition(false);
retval.rootPlanGraph = root;
return retval;
}
insertNode.setMultiPartition(true);
// Add a compensating sum of modified tuple counts or a limit 1
// AND a send on top of a union-like receive node.
boolean isReplicated = targetTable.getIsreplicated();
retval.rootPlanGraph = addCoordinatorToDMLNode(root, isReplicated);
return retval;
}
Aggregations