use of org.voltdb.planner.PlanningErrorException in project voltdb by VoltDB.
the class PlannerTool method planSql.
public synchronized AdHocPlannedStatement planSql(String sqlIn, StatementPartitioning partitioning, boolean isExplainMode, final Object[] userParams, boolean isSwapTables) {
CacheUse cacheUse = CacheUse.FAIL;
if (m_plannerStats != null) {
m_plannerStats.startStatsCollection();
}
boolean hasUserQuestionMark = false;
boolean wrongNumberParameters = false;
try {
if ((sqlIn == null) || (sqlIn.length() == 0)) {
throw new RuntimeException("Can't plan empty or null SQL.");
}
// remove any spaces or newlines
String sql = sqlIn.trim();
// if the cases tended to have mostly overlapping queries.
if (partitioning.isInferred()) {
// Check the literal cache for a match.
AdHocPlannedStatement cachedPlan = m_cache.getWithSQL(sqlIn);
if (cachedPlan != null) {
cacheUse = CacheUse.HIT1;
return cachedPlan;
} else {
cacheUse = CacheUse.MISS;
}
}
// Reset plan node id counter
AbstractPlanNode.resetPlanNodeIds();
//////////////////////
// PLAN THE STMT
//////////////////////
TrivialCostModel costModel = new TrivialCostModel();
DatabaseEstimates estimates = new DatabaseEstimates();
QueryPlanner planner = new QueryPlanner(sql, "PlannerTool", "PlannerToolProc", m_database, partitioning, m_hsql, estimates, !VoltCompiler.DEBUG_MODE, AD_HOC_JOINED_TABLE_LIMIT, costModel, null, null, DeterminismMode.FASTER);
CompiledPlan plan = null;
String[] extractedLiterals = null;
String parsedToken = null;
try {
if (isSwapTables) {
planner.planSwapTables();
} else {
planner.parse();
}
parsedToken = planner.parameterize();
// check the parameters count
// check user input question marks with input parameters
int inputParamsLengh = userParams == null ? 0 : userParams.length;
if (planner.getAdhocUserParamsCount() != inputParamsLengh) {
wrongNumberParameters = true;
if (!isExplainMode) {
throw new PlanningErrorException(String.format("Incorrect number of parameters passed: expected %d, passed %d", planner.getAdhocUserParamsCount(), inputParamsLengh));
}
}
hasUserQuestionMark = planner.getAdhocUserParamsCount() > 0;
// do not put wrong parameter explain query into cache
if (!wrongNumberParameters && partitioning.isInferred()) {
// QueryPlanner.
assert (parsedToken != null);
extractedLiterals = planner.extractedParamLiteralValues();
List<BoundPlan> boundVariants = m_cache.getWithParsedToken(parsedToken);
if (boundVariants != null) {
assert (!boundVariants.isEmpty());
BoundPlan matched = null;
for (BoundPlan boundPlan : boundVariants) {
if (boundPlan.allowsParams(extractedLiterals)) {
matched = boundPlan;
break;
}
}
if (matched != null) {
CorePlan core = matched.m_core;
ParameterSet params = null;
if (planner.compiledAsParameterizedPlan()) {
params = planner.extractedParamValues(core.parameterTypes);
} else if (hasUserQuestionMark) {
params = ParameterSet.fromArrayNoCopy(userParams);
} else {
// No constants AdHoc queries
params = ParameterSet.emptyParameterSet();
}
AdHocPlannedStatement ahps = new AdHocPlannedStatement(sql.getBytes(Constants.UTF8ENCODING), core, params, null);
ahps.setBoundConstants(matched.m_constants);
// parameterized plan from the cache does not have exception
m_cache.put(sql, parsedToken, ahps, extractedLiterals, hasUserQuestionMark, false);
cacheUse = CacheUse.HIT2;
return ahps;
}
}
}
// If not caching or there was no cache hit, do the expensive full planning.
plan = planner.plan();
assert (plan != null);
if (plan != null && plan.getStatementPartitioning() != null) {
partitioning = plan.getStatementPartitioning();
}
} catch (Exception e) {
/*
* Don't log PlanningErrorExceptions or HSQLParseExceptions, as
* they are at least somewhat expected.
*/
String loggedMsg = "";
if (!((e instanceof PlanningErrorException) || (e instanceof HSQLParseException))) {
logException(e, "Error compiling query");
loggedMsg = " (Stack trace has been written to the log.)";
}
throw new RuntimeException("Error compiling query: " + e.toString() + loggedMsg, e);
}
if (plan == null) {
throw new RuntimeException("Null plan received in PlannerTool.planSql");
}
//////////////////////
// OUTPUT THE RESULT
//////////////////////
CorePlan core = new CorePlan(plan, m_catalogHash);
AdHocPlannedStatement ahps = new AdHocPlannedStatement(plan, core);
// do not put wrong parameter explain query into cache
if (!wrongNumberParameters && partitioning.isInferred()) {
// Note either the parameter index (per force to a user-provided parameter) or
// the actual constant value of the partitioning key inferred from the plan.
// Either or both of these two values may simply default
// to -1 and to null, respectively.
core.setPartitioningParamIndex(partitioning.getInferredParameterIndex());
core.setPartitioningParamValue(partitioning.getInferredPartitioningValue());
assert (parsedToken != null);
// Again, plans with inferred partitioning are the only ones supported in the cache.
m_cache.put(sqlIn, parsedToken, ahps, extractedLiterals, hasUserQuestionMark, planner.wasBadPameterized());
}
return ahps;
} finally {
if (m_plannerStats != null) {
m_plannerStats.endStatsCollection(m_cache.getLiteralCacheSize(), m_cache.getCoreCacheSize(), cacheUse, -1);
}
}
}
use of org.voltdb.planner.PlanningErrorException in project voltdb by VoltDB.
the class SwapTablesPlanNode method initializeSwapTablesPlanNode.
/**
* Fill out all of the serializable attributes of the node, validating
* its arguments' compatibility along the way to ensure successful
* execution.
* @param theTable the catalog definition of the 1st table swap argument
* @param otherTable the catalog definition of the 2nd table swap argument
* @throws PlannerErrorException if one or more compatibility validations fail
*/
public void initializeSwapTablesPlanNode(Table theTable, Table otherTable) {
String theName = theTable.getTypeName();
setTargetTableName(theName);
String otherName = otherTable.getTypeName();
m_otherTargetTableName = otherName;
FailureMessage failureMessage = new FailureMessage(theName, otherName);
validateTableCompatibility(theName, otherName, theTable, otherTable, failureMessage);
validateColumnCompatibility(theName, otherName, theTable, otherTable, failureMessage);
// Maintain sets of indexes and index-supported (UNIQUE) constraints
// and the primary key index found on otherTable.
// Removing them as they are matched by indexes/constraints on theTable
// and added to the list of swappable indexes should leave the sets empty.
HashSet<Index> otherIndexSet = new HashSet<>();
// The constraint set is actually a HashMap to retain the
// defining constraint name for help with error messages.
// Track the primary key separately since it should match one-to-one.
HashMap<Index, String> otherConstraintIndexMap = new HashMap<>();
Index otherPrimaryKeyIndex = null;
// Collect the system-defined (internal) indexes supporting constraints
// and the primary key index if any.
CatalogMap<Constraint> candidateConstraints = otherTable.getConstraints();
for (Constraint otherConstraint : candidateConstraints) {
Index otherIndex = otherConstraint.getIndex();
if (otherIndex == null) {
// effect on the swap table plan.
continue;
}
// Set aside the one primary key index for special handling.
if (otherConstraint.getType() == ConstraintType.PRIMARY_KEY.getValue()) {
otherPrimaryKeyIndex = otherIndex;
continue;
}
otherConstraintIndexMap.put(otherIndex, otherConstraint.getTypeName());
}
// Collect the user-defined (external) indexes on otherTable. The indexes
// in this set are removed as corresponding matches are found.
// System-generated indexes that support constraints are checked separately,
// so don't add them to this set.
CatalogMap<Index> candidateIndexes = otherTable.getIndexes();
for (Index otherIndex : candidateIndexes) {
if (otherIndex != otherPrimaryKeyIndex && !otherConstraintIndexMap.containsKey(otherIndex)) {
otherIndexSet.add(otherIndex);
}
}
// Collect the indexes that support constraints on theTable
HashSet<Index> theConstraintIndexSet = new HashSet<>();
Index thePrimaryKeyIndex = null;
for (Constraint constraint : theTable.getConstraints()) {
Index theIndex = constraint.getIndex();
if (theIndex == null) {
continue;
}
if (constraint.getType() == ConstraintType.PRIMARY_KEY.getValue()) {
thePrimaryKeyIndex = theIndex;
continue;
}
theConstraintIndexSet.add(constraint.getIndex());
}
// make sure the indexes are swappable.
if (thePrimaryKeyIndex != null && otherPrimaryKeyIndex != null) {
if (indexesCanBeSwapped(thePrimaryKeyIndex, otherPrimaryKeyIndex)) {
m_theIndexes.add(thePrimaryKeyIndex.getTypeName());
m_otherIndexes.add(otherPrimaryKeyIndex.getTypeName());
} else {
failureMessage.addReason("PRIMARY KEY constraints do not match on both tables");
}
} else if ((thePrimaryKeyIndex != null && otherPrimaryKeyIndex == null) || (thePrimaryKeyIndex == null && otherPrimaryKeyIndex != null)) {
failureMessage.addReason("one table has a PRIMARY KEY constraint and the other does not");
}
// Try to cross-reference each user-defined index on the two tables.
for (Index theIndex : theTable.getIndexes()) {
if (theConstraintIndexSet.contains(theIndex) || theIndex == thePrimaryKeyIndex) {
// Constraints are checked below.
continue;
}
boolean matched = false;
for (Index otherIndex : otherIndexSet) {
if (indexesCanBeSwapped(theIndex, otherIndex)) {
m_theIndexes.add(theIndex.getTypeName());
m_otherIndexes.add(otherIndex.getTypeName());
otherIndexSet.remove(otherIndex);
matched = true;
break;
}
}
if (matched) {
continue;
}
// No match: look for a likely near-match based on naming
// convention for the most helpful error message.
// Otherwise, give a more generic error message.
String theIndexName = theIndex.getTypeName();
String message = "the index " + theIndexName + " on table " + theName + " has no corresponding index in the other table";
String otherIndexName = theIndexName.replace(theName, otherName);
Index otherIndex = candidateIndexes.getIgnoreCase(otherIndexName);
if (otherIndex != null) {
message += "; the closest candidate (" + otherIndexName + ") has mismatches in the following attributes: " + String.join(", ", diagnoseIndexMismatch(theIndex, otherIndex));
}
failureMessage.addReason(message);
}
// matched along the way.
if (!otherIndexSet.isEmpty()) {
List<String> indexNames = otherIndexSet.stream().map(idx -> idx.getTypeName()).collect(Collectors.toList());
failureMessage.addReason("the table " + otherName + " contains these index(es) " + "which have no corresponding indexes on " + theName + ": " + "(" + String.join(", ", indexNames) + ")");
}
// constraints on the two tables.
for (Constraint theConstraint : theTable.getConstraints()) {
Index theIndex = theConstraint.getIndex();
if (theIndex == null) {
// effect on the swap table plan.
continue;
}
if (theConstraint.getType() == ConstraintType.PRIMARY_KEY.getValue()) {
// Primary key compatibility checked above.
continue;
}
boolean matched = false;
for (Entry<Index, String> otherEntry : otherConstraintIndexMap.entrySet()) {
Index otherIndex = otherEntry.getKey();
if (indexesCanBeSwapped(theIndex, otherIndex)) {
m_theIndexes.add(theIndex.getTypeName());
m_otherIndexes.add(otherIndex.getTypeName());
otherConstraintIndexMap.remove(otherIndex);
matched = true;
break;
}
}
if (matched) {
continue;
}
String theConstraintName = theConstraint.getTypeName();
failureMessage.addReason("the constraint " + theConstraintName + " on table " + theName + " " + "has no corresponding constraint on the other table");
}
// matched along the way.
if (!otherConstraintIndexMap.isEmpty()) {
StringBuilder sb = new StringBuilder();
sb.append("these constraints (or system internal index names) on table " + otherName + " " + "have no corresponding constraints on the other table: (");
String separator = "";
for (Entry<Index, String> remainder : otherConstraintIndexMap.entrySet()) {
String constraintName = remainder.getValue();
String description = (constraintName != null && !constraintName.equals("")) ? constraintName : ("<anonymous with system internal index name: " + remainder.getKey().getTypeName() + ">");
sb.append(separator).append(description);
separator = ", ";
}
sb.append(")");
failureMessage.addReason(sb.toString());
}
if (failureMessage.numFailures() > 0) {
throw new PlanningErrorException(failureMessage.getMessage());
}
}
use of org.voltdb.planner.PlanningErrorException in project voltdb by VoltDB.
the class ConstantValueExpression method refineValueType.
/**
* This method will alter the type of this constant expression based on the context
* in which it appears. For example, each constant in the value list of an INSERT
* statement will be refined to the type of the column in the table being inserted into.
*
* Here is a summary of the rules used to convert types here:
* - VARCHAR literals may be reinterpreted as (depending on the type needed):
* - VARBINARY (string is required to have an even number of hex digits)
* - TIMESTAMP (string must have timestamp format)
* - Some numeric type (any of the four integer types, DECIMAL or FLOAT)
*
* In addition, if this object is a VARBINARY constant (e.g., X'00abcd') and we need
* an integer constant, (any of TINYINT, SMALLINT, INTEGER or BIGINT),
* we interpret the hex digits as a 64-bit signed integer. If there are fewer than 16 hex digits,
* the most significant bits are assumed to be zeros. So for example, X'FF' appearing where we want a
* TINYINT would be out-of-range, since it's 255 and not -1.
*
* There is corresponding code for handling integer hex literals in ParameterConverter for parameters,
* and in HSQL's ExpressionValue class.
*/
@Override
public void refineValueType(VoltType neededType, int neededSize) {
int size_unit = 1;
if (neededType == m_valueType) {
if (neededSize == m_valueSize) {
return;
}
// Variably sized types need to fit within the target width.
if (neededType == VoltType.VARBINARY) {
if (!Encoder.isHexEncodedString(getValue())) {
throw new PlanningErrorException("Value (" + getValue() + ") has an invalid format for a constant " + neededType.toSQLString() + " value");
}
size_unit = 2;
} else {
assert neededType == VoltType.STRING;
}
if (getValue().length() > size_unit * neededSize) {
throw new PlanningErrorException("Value (" + getValue() + ") is too wide for a constant " + neededType.toSQLString() + " value of size " + neededSize);
}
setValueSize(neededSize);
return;
}
if (m_isNull) {
setValueType(neededType);
setValueSize(neededSize);
return;
}
// Constant's apparent type may not exactly match the target type needed.
if (neededType == VoltType.VARBINARY && (m_valueType == VoltType.STRING || m_valueType == null)) {
if (!Encoder.isHexEncodedString(getValue())) {
throw new PlanningErrorException("Value (" + getValue() + ") has an invalid format for a constant " + neededType.toSQLString() + " value");
}
size_unit = 2;
if (getValue().length() > size_unit * neededSize) {
throw new PlanningErrorException("Value (" + getValue() + ") is too wide for a constant " + neededType.toSQLString() + " value of size " + neededSize);
}
setValueType(neededType);
setValueSize(neededSize);
return;
}
if (neededType == VoltType.STRING && m_valueType == null) {
if (getValue().length() > size_unit * neededSize) {
throw new PlanningErrorException("Value (" + getValue() + ") is too wide for a constant " + neededType.toSQLString() + " value of size " + neededSize);
}
setValueType(neededType);
setValueSize(neededSize);
return;
}
if (neededType == VoltType.TIMESTAMP) {
if (m_valueType == VoltType.STRING) {
try {
// Convert date value in whatever format is supported by
// TimeStampType into VoltDB native microsecond count.
// TODO: Should datetime string be supported as the new
// canonical internal format for timestamp constants?
// Historically, the long micros value made sense because
// it was initially the only way and later the most
// direct way to initialize timestamp values in the EE.
// But now that long value can not be used to "explain"
// an expression as a valid SQL timestamp value for DDL
// round trips, forcing a reverse conversion back through
// TimeStampType to a datetime string.
TimestampType ts = new TimestampType(m_value);
m_value = String.valueOf(ts.getTime());
}// It couldn't be converted to timestamp.
catch (IllegalArgumentException e) {
throw new PlanningErrorException("Value (" + getValue() + ") has an invalid format for a constant " + neededType.toSQLString() + " value");
}
setValueType(neededType);
setValueSize(neededSize);
return;
}
}
if ((neededType == VoltType.FLOAT || neededType == VoltType.DECIMAL) && getValueType() != VoltType.VARBINARY) {
if (m_valueType == null || (m_valueType != VoltType.NUMERIC && !m_valueType.isExactNumeric())) {
try {
Double.parseDouble(getValue());
} catch (NumberFormatException nfe) {
throw new PlanningErrorException("Value (" + getValue() + ") has an invalid format for a constant " + neededType.toSQLString() + " value");
}
}
setValueType(neededType);
setValueSize(neededSize);
return;
}
if (neededType.isBackendIntegerType()) {
long value = 0;
try {
if (getValueType() == VoltType.VARBINARY) {
value = SQLParser.hexDigitsToLong(getValue());
setValue(Long.toString(value));
} else {
value = Long.parseLong(getValue());
}
} catch (SQLParser.Exception | NumberFormatException exc) {
throw new PlanningErrorException("Value (" + getValue() + ") has an invalid format for a constant " + neededType.toSQLString() + " value");
}
checkIntegerValueRange(value, neededType);
m_valueType = neededType;
m_valueSize = neededType.getLengthInBytesForFixedTypes();
return;
}
// That's it for known type conversions.
throw new PlanningErrorException("Value (" + getValue() + ") has an invalid format for a constant " + neededType.toSQLString() + " value");
}
use of org.voltdb.planner.PlanningErrorException in project voltdb by VoltDB.
the class ConstantValueExpression method explain.
@Override
public String explain(String unused) {
if (m_isNull) {
return "NULL";
}
if (m_valueType == VoltType.STRING) {
return "'" + m_value + "'";
}
if (m_valueType == VoltType.TIMESTAMP) {
try {
// Convert the datetime value in its canonical internal form,
// currently a count of epoch microseconds,
// through TimeStampType into a timestamp string.
long micros = Long.valueOf(m_value);
TimestampType ts = new TimestampType(micros);
return "'" + ts.toString() + "'";
}// It couldn't be converted to timestamp.
catch (IllegalArgumentException e) {
throw new PlanningErrorException("Value (" + getValue() + ") has an invalid format for a constant " + VoltType.TIMESTAMP.toSQLString() + " value");
}
}
return m_value;
}
use of org.voltdb.planner.PlanningErrorException in project voltdb by VoltDB.
the class StmtSubqueryScan method processTVE.
@Override
public AbstractExpression processTVE(TupleValueExpression expr, String columnName) {
Integer idx = m_outputColumnIndexMap.get(Pair.of(columnName, expr.getDifferentiator()));
if (idx == null) {
throw new PlanningErrorException("Mismatched columns " + columnName + " in subquery");
}
SchemaColumn schemaCol = m_outputColumnList.get(idx.intValue());
expr.setColumnIndex(idx.intValue());
expr.setTypeSizeAndInBytes(schemaCol);
return expr;
}
Aggregations