use of org.voltdb.planner.CompiledPlan in project voltdb by VoltDB.
the class SelectSubqueryExpression method overrideSubqueryNodeIds.
@Override
public int overrideSubqueryNodeIds(int newId) {
assert (m_subquery != null);
CompiledPlan subqueryPlan = m_subquery.getBestCostPlan();
newId = subqueryPlan.resetPlanNodeIds(newId);
resetSubqueryNodeId();
return newId;
}
use of org.voltdb.planner.CompiledPlan in project voltdb by VoltDB.
the class PlannerTool method planSql.
public synchronized AdHocPlannedStatement planSql(String sqlIn, StatementPartitioning partitioning, boolean isExplainMode, final Object[] userParams, boolean isSwapTables) {
CacheUse cacheUse = CacheUse.FAIL;
if (m_plannerStats != null) {
m_plannerStats.startStatsCollection();
}
boolean hasUserQuestionMark = false;
boolean wrongNumberParameters = false;
try {
if ((sqlIn == null) || (sqlIn.length() == 0)) {
throw new RuntimeException("Can't plan empty or null SQL.");
}
// remove any spaces or newlines
String sql = sqlIn.trim();
// if the cases tended to have mostly overlapping queries.
if (partitioning.isInferred()) {
// Check the literal cache for a match.
AdHocPlannedStatement cachedPlan = m_cache.getWithSQL(sqlIn);
if (cachedPlan != null) {
cacheUse = CacheUse.HIT1;
return cachedPlan;
} else {
cacheUse = CacheUse.MISS;
}
}
// Reset plan node id counter
AbstractPlanNode.resetPlanNodeIds();
//////////////////////
// PLAN THE STMT
//////////////////////
TrivialCostModel costModel = new TrivialCostModel();
DatabaseEstimates estimates = new DatabaseEstimates();
QueryPlanner planner = new QueryPlanner(sql, "PlannerTool", "PlannerToolProc", m_database, partitioning, m_hsql, estimates, !VoltCompiler.DEBUG_MODE, AD_HOC_JOINED_TABLE_LIMIT, costModel, null, null, DeterminismMode.FASTER);
CompiledPlan plan = null;
String[] extractedLiterals = null;
String parsedToken = null;
try {
if (isSwapTables) {
planner.planSwapTables();
} else {
planner.parse();
}
parsedToken = planner.parameterize();
// check the parameters count
// check user input question marks with input parameters
int inputParamsLengh = userParams == null ? 0 : userParams.length;
if (planner.getAdhocUserParamsCount() != inputParamsLengh) {
wrongNumberParameters = true;
if (!isExplainMode) {
throw new PlanningErrorException(String.format("Incorrect number of parameters passed: expected %d, passed %d", planner.getAdhocUserParamsCount(), inputParamsLengh));
}
}
hasUserQuestionMark = planner.getAdhocUserParamsCount() > 0;
// do not put wrong parameter explain query into cache
if (!wrongNumberParameters && partitioning.isInferred()) {
// QueryPlanner.
assert (parsedToken != null);
extractedLiterals = planner.extractedParamLiteralValues();
List<BoundPlan> boundVariants = m_cache.getWithParsedToken(parsedToken);
if (boundVariants != null) {
assert (!boundVariants.isEmpty());
BoundPlan matched = null;
for (BoundPlan boundPlan : boundVariants) {
if (boundPlan.allowsParams(extractedLiterals)) {
matched = boundPlan;
break;
}
}
if (matched != null) {
CorePlan core = matched.m_core;
ParameterSet params = null;
if (planner.compiledAsParameterizedPlan()) {
params = planner.extractedParamValues(core.parameterTypes);
} else if (hasUserQuestionMark) {
params = ParameterSet.fromArrayNoCopy(userParams);
} else {
// No constants AdHoc queries
params = ParameterSet.emptyParameterSet();
}
AdHocPlannedStatement ahps = new AdHocPlannedStatement(sql.getBytes(Constants.UTF8ENCODING), core, params, null);
ahps.setBoundConstants(matched.m_constants);
// parameterized plan from the cache does not have exception
m_cache.put(sql, parsedToken, ahps, extractedLiterals, hasUserQuestionMark, false);
cacheUse = CacheUse.HIT2;
return ahps;
}
}
}
// If not caching or there was no cache hit, do the expensive full planning.
plan = planner.plan();
assert (plan != null);
if (plan != null && plan.getStatementPartitioning() != null) {
partitioning = plan.getStatementPartitioning();
}
} catch (Exception e) {
/*
* Don't log PlanningErrorExceptions or HSQLParseExceptions, as
* they are at least somewhat expected.
*/
String loggedMsg = "";
if (!((e instanceof PlanningErrorException) || (e instanceof HSQLParseException))) {
logException(e, "Error compiling query");
loggedMsg = " (Stack trace has been written to the log.)";
}
throw new RuntimeException("Error compiling query: " + e.toString() + loggedMsg, e);
}
if (plan == null) {
throw new RuntimeException("Null plan received in PlannerTool.planSql");
}
//////////////////////
// OUTPUT THE RESULT
//////////////////////
CorePlan core = new CorePlan(plan, m_catalogHash);
AdHocPlannedStatement ahps = new AdHocPlannedStatement(plan, core);
// do not put wrong parameter explain query into cache
if (!wrongNumberParameters && partitioning.isInferred()) {
// Note either the parameter index (per force to a user-provided parameter) or
// the actual constant value of the partitioning key inferred from the plan.
// Either or both of these two values may simply default
// to -1 and to null, respectively.
core.setPartitioningParamIndex(partitioning.getInferredParameterIndex());
core.setPartitioningParamValue(partitioning.getInferredPartitioningValue());
assert (parsedToken != null);
// Again, plans with inferred partitioning are the only ones supported in the cache.
m_cache.put(sqlIn, parsedToken, ahps, extractedLiterals, hasUserQuestionMark, planner.wasBadPameterized());
}
return ahps;
} finally {
if (m_plannerStats != null) {
m_plannerStats.endStatsCollection(m_cache.getLiteralCacheSize(), m_cache.getCoreCacheSize(), cacheUse, -1);
}
}
}
use of org.voltdb.planner.CompiledPlan in project voltdb by VoltDB.
the class StatementCompiler method compileStatementAndUpdateCatalog.
/**
* This static method conveniently does a few things for its caller:
* - Formats the statement by replacing newlines with spaces
* and appends a semicolon if needed
* - Updates the catalog Statement with metadata about the statement
* - Plans the statement and puts the serialized plan in the catalog Statement
* - Updates the catalog Statment with info about the statement's parameters
* Upon successful completion, catalog statement will have been updated with
* plan fragments needed to execute the statement.
*
* @param compiler The VoltCompiler instance
* @param hsql Pass through parameter to QueryPlanner
* @param catalog Pass through parameter to QueryPlanner
* @param db Pass through parameter to QueryPlanner
* @param estimates Pass through parameter to QueryPlanner
* @param catalogStmt Catalog statement to be updated with plan
* @param xml XML for statement, if it has been previously parsed
* (may be null)
* @param stmt Text of statement to be compiled
* @param joinOrder Pass through parameter to QueryPlanner
* @param detMode Pass through parameter to QueryPlanner
* @param partitioning Partition info for statement
*/
static boolean compileStatementAndUpdateCatalog(VoltCompiler compiler, HSQLInterface hsql, Database db, DatabaseEstimates estimates, Statement catalogStmt, VoltXMLElement xml, String stmt, String joinOrder, DeterminismMode detMode, StatementPartitioning partitioning) throws VoltCompiler.VoltCompilerException {
// Cleanup whitespace newlines for catalog compatibility
// and to make statement parsing easier.
stmt = stmt.replaceAll("\n", " ");
stmt = stmt.trim();
compiler.addInfo("Compiling Statement: " + stmt);
// put the data in the catalog that we have
if (!stmt.endsWith(";")) {
stmt += ";";
}
// if this key + sql is the same, then a cached stmt can be used
String keyPrefix = compiler.getKeyPrefix(partitioning, detMode, joinOrder);
// if the key is cache-able, look for a previous statement
if (keyPrefix != null) {
Statement previousStatement = compiler.getCachedStatement(keyPrefix, stmt);
// check if the stmt exists and if it's the same sql text
if (previousStatement != null) {
catalogStmt.setAnnotation(previousStatement.getAnnotation());
catalogStmt.setAttachment(previousStatement.getAttachment());
catalogStmt.setCachekeyprefix(previousStatement.getCachekeyprefix());
catalogStmt.setCost(previousStatement.getCost());
catalogStmt.setExplainplan(previousStatement.getExplainplan());
catalogStmt.setIscontentdeterministic(previousStatement.getIscontentdeterministic());
catalogStmt.setIsorderdeterministic(previousStatement.getIsorderdeterministic());
catalogStmt.setNondeterminismdetail(previousStatement.getNondeterminismdetail());
catalogStmt.setQuerytype(previousStatement.getQuerytype());
catalogStmt.setReadonly(previousStatement.getReadonly());
catalogStmt.setReplicatedtabledml(previousStatement.getReplicatedtabledml());
catalogStmt.setSeqscancount(previousStatement.getSeqscancount());
catalogStmt.setSinglepartition(previousStatement.getSinglepartition());
catalogStmt.setSqltext(previousStatement.getSqltext());
catalogStmt.setTablesread(previousStatement.getTablesread());
catalogStmt.setTablesupdated(previousStatement.getTablesupdated());
catalogStmt.setIndexesused(previousStatement.getIndexesused());
for (StmtParameter oldSp : previousStatement.getParameters()) {
StmtParameter newSp = catalogStmt.getParameters().add(oldSp.getTypeName());
newSp.setAnnotation(oldSp.getAnnotation());
newSp.setAttachment(oldSp.getAttachment());
newSp.setIndex(oldSp.getIndex());
newSp.setIsarray(oldSp.getIsarray());
newSp.setJavatype(oldSp.getJavatype());
newSp.setSqltype(oldSp.getSqltype());
}
for (PlanFragment oldFrag : previousStatement.getFragments()) {
PlanFragment newFrag = catalogStmt.getFragments().add(oldFrag.getTypeName());
newFrag.setAnnotation(oldFrag.getAnnotation());
newFrag.setAttachment(oldFrag.getAttachment());
newFrag.setHasdependencies(oldFrag.getHasdependencies());
newFrag.setMultipartition(oldFrag.getMultipartition());
newFrag.setNontransactional(oldFrag.getNontransactional());
newFrag.setPlanhash(oldFrag.getPlanhash());
newFrag.setPlannodetree(oldFrag.getPlannodetree());
}
return true;
}
}
// determine the type of the query
QueryType qtype = QueryType.getFromSQL(stmt);
catalogStmt.setReadonly(qtype.isReadOnly());
catalogStmt.setQuerytype(qtype.getValue());
// might be null if not cacheable
catalogStmt.setCachekeyprefix(keyPrefix);
catalogStmt.setSqltext(stmt);
catalogStmt.setSinglepartition(partitioning.wasSpecifiedAsSingle());
String name = catalogStmt.getParent().getTypeName() + "-" + catalogStmt.getTypeName();
String sql = catalogStmt.getSqltext();
String stmtName = catalogStmt.getTypeName();
String procName = catalogStmt.getParent().getTypeName();
TrivialCostModel costModel = new TrivialCostModel();
CompiledPlan plan = null;
QueryPlanner planner = new QueryPlanner(sql, stmtName, procName, db, partitioning, hsql, estimates, false, DEFAULT_MAX_JOIN_TABLES, costModel, null, joinOrder, detMode);
try {
try {
if (xml != null) {
planner.parseFromXml(xml);
} else {
planner.parse();
}
plan = planner.plan();
assert (plan != null);
} catch (PlanningErrorException e) {
// These are normal expectable errors -- don't normally need a stack-trace.
String msg = "Failed to plan for statement (" + catalogStmt.getTypeName() + ") \"" + catalogStmt.getSqltext() + "\".";
if (e.getMessage() != null) {
msg += " Error: \"" + e.getMessage() + "\"";
}
throw compiler.new VoltCompilerException(msg);
} catch (Exception e) {
e.printStackTrace();
throw compiler.new VoltCompilerException("Failed to plan for stmt: " + catalogStmt.getTypeName());
}
// There is a hard-coded limit to the number of parameters that can be passed to the EE.
if (plan.parameters.length > CompiledPlan.MAX_PARAM_COUNT) {
throw compiler.new VoltCompilerException("The statement's parameter count " + plan.parameters.length + " must not exceed the maximum " + CompiledPlan.MAX_PARAM_COUNT);
}
// Check order and content determinism before accessing the detail which
// it caches.
boolean orderDeterministic = plan.isOrderDeterministic();
catalogStmt.setIsorderdeterministic(orderDeterministic);
boolean contentDeterministic = plan.isContentDeterministic() && (orderDeterministic || !plan.hasLimitOrOffset());
catalogStmt.setIscontentdeterministic(contentDeterministic);
String nondeterminismDetail = plan.nondeterminismDetail();
catalogStmt.setNondeterminismdetail(nondeterminismDetail);
catalogStmt.setSeqscancount(plan.countSeqScans());
// We will need to update the system catalogs with this new information
for (int i = 0; i < plan.parameters.length; ++i) {
StmtParameter catalogParam = catalogStmt.getParameters().add(String.valueOf(i));
catalogParam.setJavatype(plan.parameters[i].getValueType().getValue());
catalogParam.setIsarray(plan.parameters[i].getParamIsVector());
catalogParam.setIndex(i);
}
catalogStmt.setReplicatedtabledml(plan.replicatedTableDML);
// output the explained plan to disk (or caller) for debugging
// Initial capacity estimate.
StringBuilder planDescription = new StringBuilder(1000);
planDescription.append("SQL: ").append(plan.sql);
// Cost seems to only confuse people who don't understand how this number is used/generated.
if (VoltCompiler.DEBUG_MODE) {
planDescription.append("\nCOST: ").append(plan.cost);
}
planDescription.append("\nPLAN:\n");
planDescription.append(plan.explainedPlan);
String planString = planDescription.toString();
// only write to disk if compiler is in standalone mode
if (compiler.standaloneCompiler) {
BuildDirectoryUtils.writeFile(null, name + ".txt", planString, false);
}
compiler.captureDiagnosticContext(planString);
// build usage links for report generation and put them in the catalog
CatalogUtil.updateUsageAnnotations(db, catalogStmt, plan.rootPlanGraph, plan.subPlanGraph);
// set the explain plan output into the catalog (in hex) for reporting
catalogStmt.setExplainplan(Encoder.hexEncode(plan.explainedPlan));
// compute a hash of the plan
MessageDigest md = null;
try {
md = MessageDigest.getInstance("SHA-1");
} catch (NoSuchAlgorithmException e) {
e.printStackTrace();
assert (false);
// should never happen with healthy jvm
System.exit(-1);
}
// Now update our catalog information
PlanFragment planFragment = catalogStmt.getFragments().add("0");
planFragment.setHasdependencies(plan.subPlanGraph != null);
// mark a fragment as non-transactional if it never touches a persistent table
planFragment.setNontransactional(!fragmentReferencesPersistentTable(plan.rootPlanGraph));
planFragment.setMultipartition(plan.subPlanGraph != null);
byte[] planBytes = writePlanBytes(compiler, planFragment, plan.rootPlanGraph);
md.update(planBytes, 0, planBytes.length);
// compute the 40 bytes of hex from the 20 byte sha1 hash of the plans
md.reset();
md.update(planBytes);
planFragment.setPlanhash(Encoder.hexEncode(md.digest()));
if (plan.subPlanGraph != null) {
planFragment = catalogStmt.getFragments().add("1");
planFragment.setHasdependencies(false);
planFragment.setNontransactional(false);
planFragment.setMultipartition(true);
byte[] subBytes = writePlanBytes(compiler, planFragment, plan.subPlanGraph);
// compute the 40 bytes of hex from the 20 byte sha1 hash of the plans
md.reset();
md.update(subBytes);
planFragment.setPlanhash(Encoder.hexEncode(md.digest()));
}
// Planner should have rejected with an exception any statement with an unrecognized type.
int validType = catalogStmt.getQuerytype();
assert (validType != QueryType.INVALID.getValue());
return false;
} catch (StackOverflowError error) {
String msg = "Failed to plan for statement (" + catalogStmt.getTypeName() + ") \"" + catalogStmt.getSqltext() + "\". Error: \"Encountered stack overflow error. " + "Try reducing the number of predicate expressions in the query.\"";
throw compiler.new VoltCompilerException(msg);
}
}
use of org.voltdb.planner.CompiledPlan in project voltdb by VoltDB.
the class StatementCompiler method compileDefaultProcedure.
/**
* This procedure compiles a shim org.voltdb.catalog.Procedure representing a default proc.
* The shim has no plan and few details that are expensive to compute.
* The returned proc instance has a full plan and can be used to create a ProcedureRunner.
* Note that while there are two procedure objects here, none are rooted in a real catalog;
* they are entirely parallel to regular, catalog procs.
*
* This code could probably go a few different places. It duplicates a bit too much of the
* StatmentCompiler code for my taste, so I put it here. Next pass could reduce some of the
* duplication?
*/
public static Procedure compileDefaultProcedure(PlannerTool plannerTool, Procedure catProc, String sqlText) {
// fake db makes it easy to create procedures that aren't part of the main catalog
Database fakeDb = new Catalog().getClusters().add("cluster").getDatabases().add("database");
Table table = catProc.getPartitiontable();
// determine the type of the query
QueryType qtype = QueryType.getFromSQL(sqlText);
StatementPartitioning partitioning = catProc.getSinglepartition() ? StatementPartitioning.forceSP() : StatementPartitioning.forceMP();
CompiledPlan plan = plannerTool.planSqlCore(sqlText, partitioning);
Procedure newCatProc = fakeDb.getProcedures().add(catProc.getTypeName());
newCatProc.setClassname(catProc.getClassname());
newCatProc.setDefaultproc(true);
newCatProc.setEverysite(false);
newCatProc.setHasjava(false);
newCatProc.setPartitioncolumn(catProc.getPartitioncolumn());
newCatProc.setPartitionparameter(catProc.getPartitionparameter());
newCatProc.setPartitiontable(catProc.getPartitiontable());
newCatProc.setReadonly(catProc.getReadonly());
newCatProc.setSinglepartition(catProc.getSinglepartition());
newCatProc.setSystemproc(false);
if (catProc.getPartitionparameter() >= 0) {
newCatProc.setAttachment(new ProcedurePartitionInfo(VoltType.get((byte) catProc.getPartitioncolumn().getType()), catProc.getPartitionparameter()));
}
CatalogMap<Statement> statements = newCatProc.getStatements();
assert (statements != null);
Statement stmt = statements.add(VoltDB.ANON_STMT_NAME);
stmt.setSqltext(sqlText);
stmt.setReadonly(catProc.getReadonly());
stmt.setQuerytype(qtype.getValue());
stmt.setSinglepartition(catProc.getSinglepartition());
stmt.setIscontentdeterministic(true);
stmt.setIsorderdeterministic(true);
stmt.setNondeterminismdetail("NO CONTENT FOR DEFAULT PROCS");
stmt.setSeqscancount(plan.countSeqScans());
stmt.setReplicatedtabledml(!catProc.getReadonly() && table.getIsreplicated());
// We will need to update the system catalogs with this new information
for (int i = 0; i < plan.parameters.length; ++i) {
StmtParameter catalogParam = stmt.getParameters().add(String.valueOf(i));
catalogParam.setIndex(i);
ParameterValueExpression pve = plan.parameters[i];
catalogParam.setJavatype(pve.getValueType().getValue());
catalogParam.setIsarray(pve.getParamIsVector());
}
PlanFragment frag = stmt.getFragments().add("0");
// compute a hash of the plan
MessageDigest md = null;
try {
md = MessageDigest.getInstance("SHA-1");
} catch (NoSuchAlgorithmException e) {
e.printStackTrace();
assert (false);
// should never happen with healthy jvm
System.exit(-1);
}
byte[] planBytes = writePlanBytes(frag, plan.rootPlanGraph);
md.update(planBytes, 0, planBytes.length);
// compute the 40 bytes of hex from the 20 byte sha1 hash of the plans
md.reset();
md.update(planBytes);
frag.setPlanhash(Encoder.hexEncode(md.digest()));
if (plan.subPlanGraph != null) {
frag.setHasdependencies(true);
frag.setNontransactional(true);
frag.setMultipartition(true);
frag = stmt.getFragments().add("1");
frag.setHasdependencies(false);
frag.setNontransactional(false);
frag.setMultipartition(true);
byte[] subBytes = writePlanBytes(frag, plan.subPlanGraph);
// compute the 40 bytes of hex from the 20 byte sha1 hash of the plans
md.reset();
md.update(subBytes);
frag.setPlanhash(Encoder.hexEncode(md.digest()));
} else {
frag.setHasdependencies(false);
frag.setNontransactional(false);
frag.setMultipartition(false);
}
// set the procedure parameter types from the statement parameter types
int paramCount = 0;
for (StmtParameter stmtParam : CatalogUtil.getSortedCatalogItems(stmt.getParameters(), "index")) {
// name each parameter "param1", "param2", etc...
ProcParameter procParam = newCatProc.getParameters().add("param" + String.valueOf(paramCount));
procParam.setIndex(stmtParam.getIndex());
procParam.setIsarray(stmtParam.getIsarray());
procParam.setType(stmtParam.getJavatype());
paramCount++;
}
return newCatProc;
}
use of org.voltdb.planner.CompiledPlan in project voltdb by VoltDB.
the class PlannerTool method planSqlCore.
/**
* Stripped down compile that is ONLY used to plan default procedures.
*/
public synchronized CompiledPlan planSqlCore(String sql, StatementPartitioning partitioning) {
TrivialCostModel costModel = new TrivialCostModel();
DatabaseEstimates estimates = new DatabaseEstimates();
QueryPlanner planner = new QueryPlanner(sql, "PlannerTool", "PlannerToolProc", m_database, partitioning, m_hsql, estimates, !VoltCompiler.DEBUG_MODE, AD_HOC_JOINED_TABLE_LIMIT, costModel, null, null, DeterminismMode.FASTER);
CompiledPlan plan = null;
try {
// do the expensive full planning.
planner.parse();
plan = planner.plan();
assert (plan != null);
} catch (Exception e) {
/*
* Don't log PlanningErrorExceptions or HSQLParseExceptions, as they
* are at least somewhat expected.
*/
String loggedMsg = "";
if (!(e instanceof PlanningErrorException || e instanceof HSQLParseException)) {
logException(e, "Error compiling query");
loggedMsg = " (Stack trace has been written to the log.)";
}
throw new RuntimeException("Error compiling query: " + e.toString() + loggedMsg, e);
}
if (plan == null) {
throw new RuntimeException("Null plan received in PlannerTool.planSql");
}
return plan;
}
Aggregations