use of org.voltdb.catalog.ColumnRef in project voltdb by VoltDB.
the class PlanAssembler method calculateGroupbyColumnsCovered.
private List<Integer> calculateGroupbyColumnsCovered(Index index, String fromTableAlias, List<AbstractExpression> bindings) {
List<Integer> coveredGroupByColumns = new ArrayList<>();
List<ParsedColInfo> groupBys = m_parsedSelect.groupByColumns();
String exprsjson = index.getExpressionsjson();
if (exprsjson.isEmpty()) {
List<ColumnRef> indexedColRefs = CatalogUtil.getSortedCatalogItems(index.getColumns(), "index");
for (int j = 0; j < indexedColRefs.size(); j++) {
String indexColumnName = indexedColRefs.get(j).getColumn().getName();
// ignore order of keys in GROUP BY expr
int ithCovered = 0;
boolean foundPrefixedColumn = false;
for (; ithCovered < groupBys.size(); ithCovered++) {
AbstractExpression gbExpr = groupBys.get(ithCovered).expression;
if (!(gbExpr instanceof TupleValueExpression)) {
continue;
}
TupleValueExpression gbTVE = (TupleValueExpression) gbExpr;
// TVE column index has not been resolved currently
if (fromTableAlias.equals(gbTVE.getTableAlias()) && indexColumnName.equals(gbTVE.getColumnName())) {
foundPrefixedColumn = true;
break;
}
}
if (!foundPrefixedColumn) {
// no prefix match any more
break;
}
coveredGroupByColumns.add(ithCovered);
if (coveredGroupByColumns.size() == groupBys.size()) {
// covered all group by columns already
break;
}
}
} else {
StmtTableScan fromTableScan = m_parsedSelect.getStmtTableScanByAlias(fromTableAlias);
// either pure expression index or mix of expressions and simple columns
List<AbstractExpression> indexedExprs = null;
try {
indexedExprs = AbstractExpression.fromJSONArrayString(exprsjson, fromTableScan);
} catch (JSONException e) {
e.printStackTrace();
// This case sounds impossible
return coveredGroupByColumns;
}
for (AbstractExpression indexExpr : indexedExprs) {
// ignore order of keys in GROUP BY expr
List<AbstractExpression> binding = null;
for (int ithCovered = 0; ithCovered < groupBys.size(); ithCovered++) {
AbstractExpression gbExpr = groupBys.get(ithCovered).expression;
binding = gbExpr.bindingToIndexedExpression(indexExpr);
if (binding != null) {
bindings.addAll(binding);
coveredGroupByColumns.add(ithCovered);
break;
}
}
// no prefix match any more or covered all group by columns already
if (binding == null || coveredGroupByColumns.size() == groupBys.size()) {
break;
}
}
}
return coveredGroupByColumns;
}
use of org.voltdb.catalog.ColumnRef in project voltdb by VoltDB.
the class IndexCountPlanNode method createOrNull.
// Create an IndexCountPlanNode that replaces the parent aggregate and child
// indexscan IF the indexscan's end expressions are a form that can be
// modeled with an end key.
// The supported forms for end expression are:
// - null
// - one filter expression per index key component (ANDed together)
// as "combined" for the IndexScan.
// - fewer filter expressions than index key components with one of the
// (the last) being a LT comparison.
// - 1 fewer filter expressions than index key components,
// but all ANDed equality filters
// The LT restriction comes because when index key prefixes are identical
// to the prefix-only end key, the entire index key sorts greater than the
// prefix-only end-key, because it is always longer.
// These prefix-equal cases would be missed in an EQ or LTE filter,
// causing undercounts.
// A prefix-only LT filter discards prefix-equal cases, so it is allowed.
// @return the IndexCountPlanNode or null if one is not possible.
public static IndexCountPlanNode createOrNull(IndexScanPlanNode isp, AggregatePlanNode apn) {
// add support for reverse scan
// for ASC scan, check endExpression;
// for DESC scan (isReverseScan()), check the searchkeys
List<AbstractExpression> endKeys = new ArrayList<>();
// Translate the index scan's end condition into a list of end key
// expressions and note the comparison operand of the last one.
// Initially assume it to be an equality filter.
IndexLookupType endType = IndexLookupType.EQ;
List<AbstractExpression> endComparisons = ExpressionUtil.uncombinePredicate(isp.getEndExpression());
for (AbstractExpression ae : endComparisons) {
// LT or LTE expression that resets the end type.
assert (endType == IndexLookupType.EQ);
ExpressionType exprType = ae.getExpressionType();
if (exprType == ExpressionType.COMPARE_LESSTHAN) {
endType = IndexLookupType.LT;
} else if (exprType == ExpressionType.COMPARE_LESSTHANOREQUALTO) {
endType = IndexLookupType.LTE;
} else {
assert (exprType == ExpressionType.COMPARE_EQUAL || exprType == ExpressionType.COMPARE_NOTDISTINCT);
}
// PlanNodes all need private deep copies of expressions
// so that the resolveColumnIndexes results
// don't get bashed by other nodes or subsequent planner runs
endKeys.add(ae.getRight().clone());
}
int indexSize = 0;
String jsonstring = isp.getCatalogIndex().getExpressionsjson();
List<ColumnRef> indexedColRefs = null;
List<AbstractExpression> indexedExprs = null;
if (jsonstring.isEmpty()) {
indexedColRefs = CatalogUtil.getSortedCatalogItems(isp.getCatalogIndex().getColumns(), "index");
indexSize = indexedColRefs.size();
} else {
try {
indexedExprs = AbstractExpression.fromJSONArrayString(jsonstring, isp.getTableScan());
indexSize = indexedExprs.size();
} catch (JSONException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
int searchKeySize = isp.getSearchKeyExpressions().size();
int endKeySize = endKeys.size();
if (!isp.isReverseScan() && endType != IndexLookupType.LT && endKeySize > 0 && endKeySize < indexSize) {
// That is, when a prefix-only key exists and does not use LT.
if (endType != IndexLookupType.EQ || searchKeySize != indexSize || endKeySize < indexSize - 1) {
return null;
}
// To use an index count for an equality search of da compound key,
// both the search key and end key must have a component for each
// index component.
// If the search key is long enough but the end key is one component
// short, it can be patched with a type-appropriate max key value
// (if one exists for the type), but the end key comparison needs to
// change from EQ to LTE to compensate.
VoltType missingEndKeyType;
// and get the missing key component's indexed expression.
if (jsonstring.isEmpty()) {
int lastIndex = indexedColRefs.get(endKeySize).getColumn().getIndex();
for (AbstractExpression expr : endComparisons) {
if (((TupleValueExpression) (expr.getLeft())).getColumnIndex() == lastIndex) {
return null;
}
}
int catalogTypeCode = indexedColRefs.get(endKeySize).getColumn().getType();
missingEndKeyType = VoltType.get((byte) catalogTypeCode);
} else {
AbstractExpression lastIndexedExpr = indexedExprs.get(endKeySize);
for (AbstractExpression expr : endComparisons) {
if (expr.getLeft().bindingToIndexedExpression(lastIndexedExpr) != null) {
return null;
}
}
missingEndKeyType = lastIndexedExpr.getValueType();
}
String maxValueForType = missingEndKeyType.getMaxValueForKeyPadding();
// for which all legal values are less than or equal to it.
if (maxValueForType == null) {
return null;
}
ConstantValueExpression maxKey = new ConstantValueExpression();
maxKey.setValueType(missingEndKeyType);
maxKey.setValue(maxValueForType);
maxKey.setValueSize(missingEndKeyType.getLengthInBytesForFixedTypes());
endType = IndexLookupType.LTE;
endKeys.add(maxKey);
}
// DESC case
if (searchKeySize > 0 && searchKeySize < indexSize) {
return null;
}
return new IndexCountPlanNode(isp, apn, endType, endKeys);
}
use of org.voltdb.catalog.ColumnRef in project voltdb by VoltDB.
the class IndexScanPlanNode method buildSkipNullPredicate.
public static AbstractExpression buildSkipNullPredicate(int nullExprIndex, Index catalogIndex, StmtTableScan tableScan, List<AbstractExpression> searchkeyExpressions, List<Boolean> compareNotDistinct) {
String exprsjson = catalogIndex.getExpressionsjson();
List<AbstractExpression> indexedExprs = null;
if (exprsjson.isEmpty()) {
indexedExprs = new ArrayList<>();
List<ColumnRef> indexedColRefs = CatalogUtil.getSortedCatalogItems(catalogIndex.getColumns(), "index");
assert (nullExprIndex < indexedColRefs.size());
for (int i = 0; i <= nullExprIndex; i++) {
ColumnRef colRef = indexedColRefs.get(i);
Column col = colRef.getColumn();
TupleValueExpression tve = new TupleValueExpression(tableScan.getTableName(), tableScan.getTableAlias(), col, col.getIndex());
indexedExprs.add(tve);
}
} else {
try {
indexedExprs = AbstractExpression.fromJSONArrayString(exprsjson, tableScan);
assert (nullExprIndex < indexedExprs.size());
} catch (JSONException e) {
e.printStackTrace();
assert (false);
}
}
// For a partial index extract all TVE expressions from it predicate if it's NULL-rejecting expression
// These TVEs do not need to be added to the skipNUll predicate because it's redundant.
AbstractExpression indexPredicate = null;
Set<TupleValueExpression> notNullTves = null;
String indexPredicateJson = catalogIndex.getPredicatejson();
if (!StringUtil.isEmpty(indexPredicateJson)) {
try {
indexPredicate = AbstractExpression.fromJSONString(indexPredicateJson, tableScan);
assert (indexPredicate != null);
} catch (JSONException e) {
e.printStackTrace();
assert (false);
}
if (ExpressionUtil.isNullRejectingExpression(indexPredicate, tableScan.getTableAlias())) {
notNullTves = new HashSet<>();
notNullTves.addAll(ExpressionUtil.getTupleValueExpressions(indexPredicate));
}
}
AbstractExpression nullExpr = indexedExprs.get(nullExprIndex);
AbstractExpression skipNullPredicate = null;
if (notNullTves == null || !notNullTves.contains(nullExpr)) {
List<AbstractExpression> exprs = new ArrayList<>();
for (int i = 0; i < nullExprIndex; i++) {
AbstractExpression idxExpr = indexedExprs.get(i);
ExpressionType exprType = ExpressionType.COMPARE_EQUAL;
if (i < compareNotDistinct.size() && compareNotDistinct.get(i)) {
exprType = ExpressionType.COMPARE_NOTDISTINCT;
}
AbstractExpression expr = new ComparisonExpression(exprType, idxExpr, searchkeyExpressions.get(i).clone());
exprs.add(expr);
}
// then we add "nullExpr IS NULL" to the expression for matching tuples to skip. (ENG-11096)
if (nullExprIndex == searchkeyExpressions.size() || compareNotDistinct.get(nullExprIndex) == false) {
// nullExprIndex == m_searchkeyExpressions.size() - 1
AbstractExpression expr = new OperatorExpression(ExpressionType.OPERATOR_IS_NULL, nullExpr, null);
exprs.add(expr);
} else {
return null;
}
skipNullPredicate = ExpressionUtil.combinePredicates(exprs);
skipNullPredicate.finalizeValueTypes();
}
return skipNullPredicate;
}
use of org.voltdb.catalog.ColumnRef in project voltdb by VoltDB.
the class TestCatalogUtil method testToSchema.
/**
*
*/
public void testToSchema() {
String search_str = "";
// Simple check to make sure things look ok...
for (Table catalog_tbl : catalog_db.getTables()) {
StringBuilder sb = new StringBuilder();
CatalogSchemaTools.toSchema(sb, catalog_tbl, null, false, null, null);
String sql = sb.toString();
assertTrue(sql.startsWith("CREATE TABLE " + catalog_tbl.getTypeName()));
// Columns
for (Column catalog_col : catalog_tbl.getColumns()) {
assertTrue(sql.indexOf(catalog_col.getTypeName()) != -1);
}
// Constraints
for (Constraint catalog_const : catalog_tbl.getConstraints()) {
ConstraintType const_type = ConstraintType.get(catalog_const.getType());
Index catalog_idx = catalog_const.getIndex();
List<ColumnRef> columns = CatalogUtil.getSortedCatalogItems(catalog_idx.getColumns(), "index");
if (!columns.isEmpty()) {
search_str = "";
String add = "";
for (ColumnRef catalog_colref : columns) {
search_str += add + catalog_colref.getColumn().getTypeName();
add = ", ";
}
assertTrue(sql.indexOf(search_str) != -1);
}
switch(const_type) {
case PRIMARY_KEY:
assertTrue(sql.indexOf("PRIMARY KEY") != -1);
break;
case FOREIGN_KEY:
search_str = "REFERENCES " + catalog_const.getForeignkeytable().getTypeName();
assertTrue(sql.indexOf(search_str) != -1);
break;
}
}
}
}
use of org.voltdb.catalog.ColumnRef in project voltdb by VoltDB.
the class MaterializedViewProcessor method startProcessing.
/**
* Add materialized view info to the catalog for the tables that are
* materialized views.
* @throws VoltCompilerException
*/
public void startProcessing(Database db, HashMap<Table, String> matViewMap, TreeSet<String> exportTableNames) throws VoltCompilerException {
HashSet<String> viewTableNames = new HashSet<>();
for (Entry<Table, String> entry : matViewMap.entrySet()) {
viewTableNames.add(entry.getKey().getTypeName());
}
for (Entry<Table, String> entry : matViewMap.entrySet()) {
Table destTable = entry.getKey();
String query = entry.getValue();
// get the xml for the query
VoltXMLElement xmlquery = null;
try {
xmlquery = m_hsql.getXMLCompiledStatement(query);
} catch (HSQLParseException e) {
e.printStackTrace();
}
assert (xmlquery != null);
// parse the xml like any other sql statement
ParsedSelectStmt stmt = null;
try {
stmt = (ParsedSelectStmt) AbstractParsedStmt.parse(query, xmlquery, null, db, null);
} catch (Exception e) {
throw m_compiler.new VoltCompilerException(e.getMessage());
}
assert (stmt != null);
String viewName = destTable.getTypeName();
// throw an error if the view isn't within voltdb's limited world view
checkViewMeetsSpec(viewName, stmt);
// The primary key index is yet to be defined (below).
for (Index destIndex : destTable.getIndexes()) {
if (destIndex.getUnique() || destIndex.getAssumeunique()) {
String msg = "A UNIQUE or ASSUMEUNIQUE index is not allowed on a materialized view. " + "Remove the qualifier from the index " + destIndex.getTypeName() + "defined on the materialized view \"" + viewName + "\".";
throw m_compiler.new VoltCompilerException(msg);
}
}
// A Materialized view cannot depend on another view.
for (Table srcTable : stmt.m_tableList) {
if (viewTableNames.contains(srcTable.getTypeName())) {
String msg = String.format("A materialized view (%s) can not be defined on another view (%s).", viewName, srcTable.getTypeName());
throw m_compiler.new VoltCompilerException(msg);
}
}
// The existing code base still need this materializer field to tell if a table
// is a materialized view table. Leaving this for future refactoring.
destTable.setMaterializer(stmt.m_tableList.get(0));
List<Column> destColumnArray = CatalogUtil.getSortedCatalogItems(destTable.getColumns(), "index");
List<AbstractExpression> groupbyExprs = null;
if (stmt.hasComplexGroupby()) {
groupbyExprs = new ArrayList<>();
for (ParsedColInfo col : stmt.groupByColumns()) {
groupbyExprs.add(col.expression);
}
}
// Generate query XMLs for min/max recalculation (ENG-8641)
boolean isMultiTableView = stmt.m_tableList.size() > 1;
MatViewFallbackQueryXMLGenerator xmlGen = new MatViewFallbackQueryXMLGenerator(xmlquery, stmt.groupByColumns(), stmt.m_displayColumns, isMultiTableView);
List<VoltXMLElement> fallbackQueryXMLs = xmlGen.getFallbackQueryXMLs();
// index or constraint in order to avoid error and crash.
if (stmt.groupByColumns().size() != 0) {
Index pkIndex = destTable.getIndexes().add(HSQLInterface.AUTO_GEN_MATVIEW_IDX);
pkIndex.setType(IndexType.BALANCED_TREE.getValue());
pkIndex.setUnique(true);
// assume index 1 throuh #grpByCols + 1 are the cols
for (int i = 0; i < stmt.groupByColumns().size(); i++) {
ColumnRef c = pkIndex.getColumns().add(String.valueOf(i));
c.setColumn(destColumnArray.get(i));
c.setIndex(i);
}
Constraint pkConstraint = destTable.getConstraints().add(HSQLInterface.AUTO_GEN_MATVIEW_CONST);
pkConstraint.setType(ConstraintType.PRIMARY_KEY.getValue());
pkConstraint.setIndex(pkIndex);
}
// If we have an unsafe MV message, then
// remember it here. We don't really know how
// to transfer the message through the catalog, but
// we can transmit the existence of the message.
boolean isSafeForDDL = (stmt.getUnsafeMVMessage() == null);
// Here the code path diverges for different kinds of views (single table view and joined table view)
if (isMultiTableView) {
// Materialized view on joined tables
// Add mvHandlerInfo to the destTable:
MaterializedViewHandlerInfo mvHandlerInfo = destTable.getMvhandlerinfo().add("mvHandlerInfo");
mvHandlerInfo.setDesttable(destTable);
for (Table srcTable : stmt.m_tableList) {
// Now we do not support having a view on persistent tables joining streamed tables.
if (exportTableNames.contains(srcTable.getTypeName())) {
String msg = String.format("A materialized view (%s) on joined tables cannot have streamed table (%s) as its source.", viewName, srcTable.getTypeName());
throw m_compiler.new VoltCompilerException(msg);
}
// The view table will need to keep a list of its source tables.
// The list is used to install / uninstall the view reference on the source tables when the
// view handler is constructed / destroyed.
TableRef tableRef = mvHandlerInfo.getSourcetables().add(srcTable.getTypeName());
tableRef.setTable(srcTable);
// There could be more than one partition column candidate, but we will only use the first one we found.
if (destTable.getPartitioncolumn() == null && srcTable.getPartitioncolumn() != null) {
Column partitionColumn = srcTable.getPartitioncolumn();
String partitionColName = partitionColumn.getTypeName();
String srcTableName = srcTable.getTypeName();
destTable.setIsreplicated(false);
if (stmt.hasComplexGroupby()) {
for (int i = 0; i < groupbyExprs.size(); i++) {
AbstractExpression groupbyExpr = groupbyExprs.get(i);
if (groupbyExpr instanceof TupleValueExpression) {
TupleValueExpression tve = (TupleValueExpression) groupbyExpr;
if (tve.getTableName().equals(srcTableName) && tve.getColumnName().equals(partitionColName)) {
// The partition column is set to destColumnArray.get(i), because we have the restriction
// that the non-aggregate columns must come at the very begining, and must exactly match
// the group-by columns.
// If we are going to remove this restriction in the future, then we need to do more work
// in order to find a proper partition column.
destTable.setPartitioncolumn(destColumnArray.get(i));
break;
}
}
}
} else {
for (int i = 0; i < stmt.groupByColumns().size(); i++) {
ParsedColInfo gbcol = stmt.groupByColumns().get(i);
if (gbcol.tableName.equals(srcTableName) && gbcol.columnName.equals(partitionColName)) {
destTable.setPartitioncolumn(destColumnArray.get(i));
break;
}
}
}
}
// end find partition column
}
// end for each source table
compileFallbackQueriesAndUpdateCatalog(db, query, fallbackQueryXMLs, mvHandlerInfo);
compileCreateQueryAndUpdateCatalog(db, query, xmlquery, mvHandlerInfo);
mvHandlerInfo.setGroupbycolumncount(stmt.groupByColumns().size());
for (int i = 0; i < stmt.m_displayColumns.size(); i++) {
ParsedColInfo col = stmt.m_displayColumns.get(i);
Column destColumn = destColumnArray.get(i);
setTypeAttributesForColumn(destColumn, col.expression);
// Set the expression type here to determine the behavior of the merge function.
destColumn.setAggregatetype(col.expression.getExpressionType().getValue());
}
mvHandlerInfo.setIssafewithnonemptysources(isSafeForDDL);
} else {
// =======================================================================================
// Materialized view on single table
// create the materializedviewinfo catalog node for the source table
Table srcTable = stmt.m_tableList.get(0);
MaterializedViewInfo matviewinfo = srcTable.getViews().add(viewName);
matviewinfo.setDest(destTable);
AbstractExpression where = stmt.getSingleTableFilterExpression();
if (where != null) {
String hex = Encoder.hexEncode(where.toJSONString());
matviewinfo.setPredicate(hex);
} else {
matviewinfo.setPredicate("");
}
List<Column> srcColumnArray = CatalogUtil.getSortedCatalogItems(srcTable.getColumns(), "index");
if (stmt.hasComplexGroupby()) {
// Parse group by expressions to json string
String groupbyExprsJson = null;
try {
groupbyExprsJson = DDLCompiler.convertToJSONArray(groupbyExprs);
} catch (JSONException e) {
throw m_compiler.new VoltCompilerException("Unexpected error serializing non-column " + "expressions for group by expressions: " + e.toString());
}
matviewinfo.setGroupbyexpressionsjson(groupbyExprsJson);
} else {
// add the group by columns from the src table
for (int i = 0; i < stmt.groupByColumns().size(); i++) {
ParsedColInfo gbcol = stmt.groupByColumns().get(i);
Column srcCol = srcColumnArray.get(gbcol.index);
ColumnRef cref = matviewinfo.getGroupbycols().add(srcCol.getTypeName());
// groupByColumns is iterating in order of groups. Store that grouping order
// in the column ref index. When the catalog is serialized, it will, naturally,
// scramble this order like a two year playing dominos, presenting the data
// in a meaningless sequence.
// the column offset in the view's grouping order
cref.setIndex(i);
// the source column from the base (non-view) table
cref.setColumn(srcCol);
// parse out the group by columns into the dest table
ParsedColInfo col = stmt.m_displayColumns.get(i);
Column destColumn = destColumnArray.get(i);
processMaterializedViewColumn(srcTable, destColumn, ExpressionType.VALUE_TUPLE, (TupleValueExpression) col.expression);
}
}
// Set up COUNT(*) column
ParsedColInfo countCol = stmt.m_displayColumns.get(stmt.groupByColumns().size());
assert (countCol.expression.getExpressionType() == ExpressionType.AGGREGATE_COUNT_STAR);
assert (countCol.expression.getLeft() == null);
processMaterializedViewColumn(srcTable, destColumnArray.get(stmt.groupByColumns().size()), ExpressionType.AGGREGATE_COUNT_STAR, null);
// prepare info for aggregation columns.
List<AbstractExpression> aggregationExprs = new ArrayList<>();
boolean hasAggregationExprs = false;
ArrayList<AbstractExpression> minMaxAggs = new ArrayList<>();
for (int i = stmt.groupByColumns().size() + 1; i < stmt.m_displayColumns.size(); i++) {
ParsedColInfo col = stmt.m_displayColumns.get(i);
AbstractExpression aggExpr = col.expression.getLeft();
if (aggExpr.getExpressionType() != ExpressionType.VALUE_TUPLE) {
hasAggregationExprs = true;
}
aggregationExprs.add(aggExpr);
if (col.expression.getExpressionType() == ExpressionType.AGGREGATE_MIN || col.expression.getExpressionType() == ExpressionType.AGGREGATE_MAX) {
minMaxAggs.add(aggExpr);
}
}
compileFallbackQueriesAndUpdateCatalog(db, query, fallbackQueryXMLs, matviewinfo);
// set Aggregation Expressions.
if (hasAggregationExprs) {
String aggregationExprsJson = null;
try {
aggregationExprsJson = DDLCompiler.convertToJSONArray(aggregationExprs);
} catch (JSONException e) {
throw m_compiler.new VoltCompilerException("Unexpected error serializing non-column " + "expressions for aggregation expressions: " + e.toString());
}
matviewinfo.setAggregationexpressionsjson(aggregationExprsJson);
}
// Find index for each min/max aggCol/aggExpr (ENG-6511 and ENG-8512)
for (Integer i = 0; i < minMaxAggs.size(); ++i) {
Index found = findBestMatchIndexForMatviewMinOrMax(matviewinfo, srcTable, groupbyExprs, minMaxAggs.get(i));
IndexRef refFound = matviewinfo.getIndexforminmax().add(i.toString());
if (found != null) {
refFound.setName(found.getTypeName());
} else {
refFound.setName("");
}
}
// The COUNT(*) should return a BIGINT column, whereas we found here the COUNT(*) was assigned a INTEGER column.
for (int i = 0; i <= stmt.groupByColumns().size(); i++) {
ParsedColInfo col = stmt.m_displayColumns.get(i);
Column destColumn = destColumnArray.get(i);
setTypeAttributesForColumn(destColumn, col.expression);
}
// parse out the aggregation columns into the dest table
for (int i = stmt.groupByColumns().size() + 1; i < stmt.m_displayColumns.size(); i++) {
ParsedColInfo col = stmt.m_displayColumns.get(i);
Column destColumn = destColumnArray.get(i);
AbstractExpression colExpr = col.expression.getLeft();
TupleValueExpression tve = null;
if (colExpr.getExpressionType() == ExpressionType.VALUE_TUPLE) {
tve = (TupleValueExpression) colExpr;
}
processMaterializedViewColumn(srcTable, destColumn, col.expression.getExpressionType(), tve);
setTypeAttributesForColumn(destColumn, col.expression);
}
if (srcTable.getPartitioncolumn() != null) {
// Set the partitioning of destination tables of associated views.
// If a view's source table is replicated, then a full scan of the
// associated view is single-sited. If the source is partitioned,
// a full scan of the view must be distributed, unless it is filtered
// by the original table's partitioning key, which, to be filtered,
// must also be a GROUP BY key.
destTable.setIsreplicated(false);
setGroupedTablePartitionColumn(matviewinfo, srcTable.getPartitioncolumn());
}
matviewinfo.setIssafewithnonemptysources(isSafeForDDL);
}
// end if single table view materialized view.
}
}
Aggregations