use of org.voltdb.VoltType in project voltdb by VoltDB.
the class AbstractParsedStmt method parseParameters.
/**
* Populate the statement's paramList from the "parameters" element. Each
* parameter has an id and an index, both of which are numeric. It also has
* a type and an indication of whether it's a vector parameter. For each
* parameter, we create a ParameterValueExpression, named pve, which holds
* the type and vector parameter indication. We add the pve to two maps,
* m_paramsById and m_paramsByIndex.
*
* We also set a counter, MAX_PARAMETER_ID, to the largest id in the
* expression. This helps give ids to references to correlated expressions
* of subqueries.
*
* @param paramsNode
*/
protected void parseParameters(VoltXMLElement root) {
VoltXMLElement paramsNode = null;
for (VoltXMLElement node : root.children) {
if (node.name.equalsIgnoreCase("parameters")) {
paramsNode = node;
break;
}
}
if (paramsNode == null) {
return;
}
long max_parameter_id = -1;
for (VoltXMLElement node : paramsNode.children) {
if (node.name.equalsIgnoreCase("parameter")) {
long id = Long.parseLong(node.attributes.get("id"));
int index = Integer.parseInt(node.attributes.get("index"));
if (index > max_parameter_id) {
max_parameter_id = index;
}
String typeName = node.attributes.get("valuetype");
String isVectorParam = node.attributes.get("isvector");
VoltType type = VoltType.typeFromString(typeName);
ParameterValueExpression pve = new ParameterValueExpression();
pve.setParameterIndex(index);
pve.setValueType(type);
if (isVectorParam != null && isVectorParam.equalsIgnoreCase("true")) {
pve.setParamIsVector();
}
m_paramsById.put(id, pve);
m_paramsByIndex.put(index, pve);
}
}
if (max_parameter_id >= NEXT_PARAMETER_ID) {
NEXT_PARAMETER_ID = (int) max_parameter_id + 1;
}
}
use of org.voltdb.VoltType in project voltdb by VoltDB.
the class AbstractParsedStmt method parseValueExpression.
/**
*
* @param paramsById
* @param exprNode
* @return
*/
private AbstractExpression parseValueExpression(VoltXMLElement exprNode) {
String isParam = exprNode.attributes.get("isparam");
String isPlannerGenerated = exprNode.attributes.get("isplannergenerated");
// A ParameterValueExpression is needed to represent any user-provided or planner-injected parameter.
boolean needParameter = (isParam != null) && (isParam.equalsIgnoreCase("true"));
// A ConstantValueExpression is needed to represent a constant in the statement,
// EVEN if that constant has been "parameterized" by the plan caching code.
ConstantValueExpression cve = null;
boolean needConstant = (needParameter == false) || ((isPlannerGenerated != null) && (isPlannerGenerated.equalsIgnoreCase("true")));
if (needConstant) {
String type = exprNode.attributes.get("valuetype");
VoltType vt = VoltType.typeFromString(type);
assert (vt != VoltType.VOLTTABLE);
cve = new ConstantValueExpression();
cve.setValueType(vt);
if ((vt != VoltType.NULL) && (vt != VoltType.NUMERIC)) {
int size = vt.getMaxLengthInBytes();
cve.setValueSize(size);
}
if (!needParameter && vt != VoltType.NULL) {
String valueStr = exprNode.attributes.get("value");
// given type.
if (valueStr != null) {
try {
switch(vt) {
case BIGINT:
case TIMESTAMP:
Long.valueOf(valueStr);
break;
case FLOAT:
Double.valueOf(valueStr);
break;
case DECIMAL:
VoltDecimalHelper.stringToDecimal(valueStr);
break;
default:
break;
}
} catch (PlanningErrorException ex) {
// We're happy with these.
throw ex;
} catch (NumberFormatException ex) {
throw new PlanningErrorException("Numeric conversion error to type " + vt.name() + " " + ex.getMessage().toLowerCase());
} catch (Exception ex) {
throw new PlanningErrorException(ex.getMessage());
}
}
cve.setValue(valueStr);
}
}
if (needParameter) {
long id = Long.parseLong(exprNode.attributes.get("id"));
ParameterValueExpression expr = m_paramsById.get(id);
assert (expr != null);
if (needConstant) {
expr.setOriginalValue(cve);
cve.setValue(m_paramValues[expr.getParameterIndex()]);
}
return expr;
}
return cve;
}
use of org.voltdb.VoltType in project voltdb by VoltDB.
the class CorePlan method flattenToBuffer.
public void flattenToBuffer(ByteBuffer buf) throws IOException {
// plan fragments first
buf.putInt(aggregatorFragment.length);
buf.put(aggregatorFragment);
buf.put(aggregatorHash);
if (collectorFragment == null) {
buf.putInt(-1);
} else {
buf.putInt(collectorFragment.length);
buf.put(collectorFragment);
buf.put(collectorHash);
}
// booleans
buf.put((byte) (isReplicatedTableDML ? 1 : 0));
buf.put((byte) (readOnly ? 1 : 0));
// catalog hash
buf.put(catalogHash);
// param types
buf.putShort((short) parameterTypes.length);
for (VoltType type : parameterTypes) {
buf.put(type.getValue());
}
}
use of org.voltdb.VoltType in project voltdb by VoltDB.
the class AbstractParsedStmt method parseFunctionExpression.
/**
*
* @param paramsById
* @param exprNode
* @return a new Function Expression
*/
private AbstractExpression parseFunctionExpression(VoltXMLElement exprNode) {
String name = exprNode.attributes.get("name").toLowerCase();
String disabled = exprNode.attributes.get("disabled");
if (disabled != null) {
throw new PlanningErrorException("Function '" + name + "' is not supported in VoltDB: " + disabled);
}
String value_type_name = exprNode.attributes.get("valuetype");
VoltType value_type = VoltType.typeFromString(value_type_name);
String function_id = exprNode.attributes.get("function_id");
assert (function_id != null);
int idArg = 0;
try {
idArg = Integer.parseInt(function_id);
} catch (NumberFormatException nfe) {
}
assert (idArg > 0);
String result_type_parameter_index = exprNode.attributes.get("result_type_parameter_index");
String implied_argument = exprNode.attributes.get("implied_argument");
ArrayList<AbstractExpression> args = new ArrayList<>();
for (VoltXMLElement argNode : exprNode.children) {
assert (argNode != null);
// recursively parse each argument subtree (could be any kind of expression).
AbstractExpression argExpr = parseExpressionNode(argNode);
assert (argExpr != null);
args.add(argExpr);
}
FunctionExpression expr = new FunctionExpression();
expr.setAttributes(name, implied_argument, idArg);
expr.setArgs(args);
if (value_type != null) {
expr.setValueType(value_type);
if (value_type != VoltType.INVALID && value_type != VoltType.NUMERIC) {
int size = value_type.getMaxLengthInBytes();
expr.setValueSize(size);
}
}
if (result_type_parameter_index != null) {
int parameter_idx = -1;
try {
parameter_idx = Integer.parseInt(result_type_parameter_index);
} catch (NumberFormatException nfe) {
}
// better be valid by now.
assert (parameter_idx >= 0);
// must refer to a provided argument
assert (parameter_idx < args.size());
expr.setResultTypeParameterIndex(parameter_idx);
expr.negotiateInitialValueTypes();
}
return expr;
}
use of org.voltdb.VoltType in project voltdb by VoltDB.
the class StatementPartitioning method analyzeForMultiPartitionAccess.
/**
* Given the query's list of tables and its collection(s) of equality-filtered columns and their equivalents,
* determine whether all joins involving partitioned tables can be executed locally on a single partition.
* This is only the case when they include equality comparisons between partition key columns.
* VoltDB will reject joins of multiple partitioned tables unless all their partition keys are
* constrained to be equal to each other.
* Example: select * from T1, T2 where T1.ID = T2.ID
* Additionally, in this case, there may be a constant equality filter on any of the columns,
* which we want to extract as our SP partitioning parameter.
*
* @param tableAliasList The tables.
* @param valueEquivalence Their column equality filters
* @return the number of independently partitioned tables
* -- partitioned tables that aren't joined or filtered by the same value.
* The caller can raise an alarm if there is more than one.
*/
public void analyzeForMultiPartitionAccess(Collection<StmtTableScan> scans, HashMap<AbstractExpression, Set<AbstractExpression>> valueEquivalence) {
//* enable to debug */ System.out.println("DEBUG: analyze4MPAccess w/ scans:" + scans.size() + " filters:" + valueEquivalence.size());
TupleValueExpression tokenPartitionKey = null;
Set<Set<AbstractExpression>> eqSets = new HashSet<Set<AbstractExpression>>();
int unfilteredPartitionKeyCount = 0;
// reset this flag to forget the last result of the multiple partition access path.
// AdHoc with parameters will call this function at least two times
// By default this flag should be true.
setJoinValid(true);
setJoinInvalidReason(null);
boolean subqueryHasReceiveNode = false;
boolean hasPartitionedTableJoin = false;
// Iterate over the tables to collect partition columns.
for (StmtTableScan tableScan : scans) {
// Replicated tables don't need filter coverage.
if (tableScan.getIsReplicated()) {
continue;
}
// The partition column can be null in an obscure edge case.
// The table is declared non-replicated yet specifies no partitioning column.
// This can occur legitimately when views based on partitioned tables neglect to group by the partition column.
// The interpretation of this edge case is that the table has "randomly distributed data".
// In such a case, the table is valid for use by MP queries only and can only be joined with replicated tables
// because it has no recognized partitioning join key.
List<SchemaColumn> columnsNeedingCoverage = tableScan.getPartitioningColumns();
if (tableScan instanceof StmtSubqueryScan) {
StmtSubqueryScan subScan = (StmtSubqueryScan) tableScan;
subScan.promoteSinglePartitionInfo(valueEquivalence, eqSets);
CompiledPlan subqueryPlan = subScan.getBestCostPlan();
if ((!subScan.canRunInOneFragment()) || ((subqueryPlan != null) && subqueryPlan.rootPlanGraph.hasAnyNodeOfClass(AbstractReceivePlanNode.class))) {
if (subqueryHasReceiveNode) {
// Has found another subquery with receive node on the same level
// Not going to support this kind of subquery join with 2 fragment plan.
setJoinValid(false);
setJoinInvalidReason("This multipartition query is not plannable. " + "It has a subquery which cannot be single partition.");
// Still needs to count the independent partition tables
break;
}
subqueryHasReceiveNode = true;
if (subScan.isTableAggregate()) {
// Any process based on this subquery should require 1 fragment only.
continue;
}
} else {
// this subquery partition table without receive node
hasPartitionedTableJoin = true;
}
} else {
// This table is a partition table
hasPartitionedTableJoin = true;
}
boolean unfiltered = true;
for (AbstractExpression candidateColumn : valueEquivalence.keySet()) {
if (!(candidateColumn instanceof TupleValueExpression)) {
continue;
}
TupleValueExpression candidatePartitionKey = (TupleValueExpression) candidateColumn;
if (!canCoverPartitioningColumn(candidatePartitionKey, columnsNeedingCoverage)) {
continue;
}
unfiltered = false;
if (tokenPartitionKey == null) {
tokenPartitionKey = candidatePartitionKey;
}
eqSets.add(valueEquivalence.get(candidatePartitionKey));
}
if (unfiltered) {
++unfilteredPartitionKeyCount;
}
}
// end for each table StmtTableScan in the collection
m_countOfIndependentlyPartitionedTables = eqSets.size() + unfilteredPartitionKeyCount;
//* enable to debug */ System.out.println("DEBUG: analyze4MPAccess found: " + m_countOfIndependentlyPartitionedTables + " = " + eqSets.size() + " + " + unfilteredPartitionKeyCount);
if (m_countOfIndependentlyPartitionedTables > 1) {
setJoinValid(false);
setJoinInvalidReason("This query is not plannable. " + "The planner cannot guarantee that all rows would be in a single partition.");
}
// on outer level. Not going to support this kind of join.
if (subqueryHasReceiveNode && hasPartitionedTableJoin) {
setJoinValid(false);
setJoinInvalidReason("This query is not plannable. It has a subquery which needs cross-partition access.");
}
if ((unfilteredPartitionKeyCount == 0) && (eqSets.size() == 1)) {
for (Set<AbstractExpression> partitioningValues : eqSets) {
for (AbstractExpression constExpr : partitioningValues) {
if (constExpr instanceof TupleValueExpression) {
continue;
}
VoltType valueType = tokenPartitionKey.getValueType();
addPartitioningExpression(tokenPartitionKey.getTableName() + '.' + tokenPartitionKey.getColumnName(), constExpr, valueType);
// Only need one constant value.
break;
}
}
}
}
Aggregations