use of org.apache.phoenix.parse.ParseNode in project phoenix by apache.
the class MetaDataClient method createIndex.
/**
* Create an index table by morphing the CreateIndexStatement into a CreateTableStatement and calling
* MetaDataClient.createTable. In doing so, we perform the following translations:
* 1) Change the type of any columns being indexed to types that support null if the column is nullable.
* For example, a BIGINT type would be coerced to a DECIMAL type, since a DECIMAL type supports null
* when it's in the row key while a BIGINT does not.
* 2) Append any row key column from the data table that is not in the indexed column list. Our indexes
* rely on having a 1:1 correspondence between the index and data rows.
* 3) Change the name of the columns to include the column family. For example, if you have a column
* named "B" in a column family named "A", the indexed column name will be "A:B". This makes it easy
* to translate the column references in a query to the correct column references in an index table
* regardless of whether the column reference is prefixed with the column family name or not. It also
* has the side benefit of allowing the same named column in different column families to both be
* listed as an index column.
* @param statement
* @param splits
* @return MutationState from population of index table from data table
* @throws SQLException
*/
public MutationState createIndex(CreateIndexStatement statement, byte[][] splits) throws SQLException {
IndexKeyConstraint ik = statement.getIndexConstraint();
TableName indexTableName = statement.getIndexTableName();
Map<String, Object> tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());
Map<String, Object> commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size() + 1);
populatePropertyMaps(statement.getProps(), tableProps, commonFamilyProps);
List<Pair<ParseNode, SortOrder>> indexParseNodeAndSortOrderList = ik.getParseNodeAndSortOrderList();
List<ColumnName> includedColumns = statement.getIncludeColumns();
TableRef tableRef = null;
PTable table = null;
int numRetries = 0;
boolean allocateIndexId = false;
boolean isLocalIndex = statement.getIndexType() == IndexType.LOCAL;
int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion();
if (isLocalIndex) {
if (!connection.getQueryServices().getProps().getBoolean(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, QueryServicesOptions.DEFAULT_ALLOW_LOCAL_INDEX)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
}
if (!connection.getQueryServices().supportsFeature(Feature.LOCAL_INDEX)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
}
}
while (true) {
try {
ColumnResolver resolver = FromCompiler.getResolver(statement, connection, statement.getUdfParseNodes());
tableRef = resolver.getTables().get(0);
Date asyncCreatedDate = null;
if (statement.isAsync()) {
asyncCreatedDate = new Date(tableRef.getTimeStamp());
}
PTable dataTable = tableRef.getTable();
boolean isTenantConnection = connection.getTenantId() != null;
if (isTenantConnection) {
if (dataTable.getType() != PTableType.VIEW) {
throw new SQLFeatureNotSupportedException("An index may only be created for a VIEW through a tenant-specific connection");
}
}
if (!dataTable.isImmutableRows()) {
if (hbaseVersion < PhoenixDatabaseMetaData.MUTABLE_SI_VERSION_THRESHOLD) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
}
if (!connection.getQueryServices().hasIndexWALCodec() && !dataTable.isTransactional()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_MUTABLE_INDEX_CONFIG).setTableName(indexTableName.getTableName()).build().buildException();
}
}
int posOffset = 0;
List<PColumn> pkColumns = dataTable.getPKColumns();
Set<RowKeyColumnExpression> unusedPkColumns;
if (dataTable.getBucketNum() != null) {
// Ignore SALT column
unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size() - 1);
posOffset++;
} else {
unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size());
}
for (int i = posOffset; i < pkColumns.size(); i++) {
PColumn column = pkColumns.get(i);
unusedPkColumns.add(new RowKeyColumnExpression(column, new RowKeyValueAccessor(pkColumns, i), "\"" + column.getName().getString() + "\""));
}
List<ColumnDefInPkConstraint> allPkColumns = Lists.newArrayListWithExpectedSize(unusedPkColumns.size());
List<ColumnDef> columnDefs = Lists.newArrayListWithExpectedSize(includedColumns.size() + indexParseNodeAndSortOrderList.size());
/*
* Allocate an index ID in two circumstances:
* 1) for a local index, as all local indexes will reside in the same HBase table
* 2) for a view on an index.
*/
if (isLocalIndex || (dataTable.getType() == PTableType.VIEW && dataTable.getViewType() != ViewType.MAPPED)) {
allocateIndexId = true;
PDataType dataType = MetaDataUtil.getViewIndexIdDataType();
ColumnName colName = ColumnName.caseSensitiveColumnName(MetaDataUtil.getViewIndexIdColumnName());
allPkColumns.add(new ColumnDefInPkConstraint(colName, SortOrder.getDefault(), false));
columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), false, null, null, false, SortOrder.getDefault(), null, false));
}
if (dataTable.isMultiTenant()) {
PColumn col = dataTable.getPKColumns().get(posOffset);
RowKeyColumnExpression columnExpression = new RowKeyColumnExpression(col, new RowKeyValueAccessor(pkColumns, posOffset), col.getName().getString());
unusedPkColumns.remove(columnExpression);
PDataType dataType = IndexUtil.getIndexColumnDataType(col);
ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
allPkColumns.add(new ColumnDefInPkConstraint(colName, col.getSortOrder(), false));
columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, SortOrder.getDefault(), col.getName().getString(), col.isRowTimestamp()));
}
PhoenixStatement phoenixStatment = new PhoenixStatement(connection);
StatementContext context = new StatementContext(phoenixStatment, resolver);
IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context);
Set<ColumnName> indexedColumnNames = Sets.newHashSetWithExpectedSize(indexParseNodeAndSortOrderList.size());
for (Pair<ParseNode, SortOrder> pair : indexParseNodeAndSortOrderList) {
ParseNode parseNode = pair.getFirst();
// normalize the parse node
parseNode = StatementNormalizer.normalize(parseNode, resolver);
// compile the parseNode to get an expression
expressionIndexCompiler.reset();
Expression expression = parseNode.accept(expressionIndexCompiler);
if (expressionIndexCompiler.isAggregate()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
}
if (expression.getDeterminism() != Determinism.ALWAYS) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
}
if (expression.isStateless()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
}
unusedPkColumns.remove(expression);
// Go through parse node to get string as otherwise we
// can lose information during compilation
StringBuilder buf = new StringBuilder();
parseNode.toSQL(resolver, buf);
// need to escape backslash as this expression will be re-parsed later
String expressionStr = StringUtil.escapeBackslash(buf.toString());
ColumnName colName = null;
ColumnRef colRef = expressionIndexCompiler.getColumnRef();
boolean isRowTimestamp = false;
if (colRef != null) {
// if this is a regular column
PColumn column = colRef.getColumn();
String columnFamilyName = column.getFamilyName() != null ? column.getFamilyName().getString() : null;
colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(columnFamilyName, column.getName().getString()));
isRowTimestamp = column.isRowTimestamp();
if (colRef.getColumn().getExpressionStr() != null) {
expressionStr = colRef.getColumn().getExpressionStr();
}
} else {
// if this is an expression
// TODO column names cannot have double quotes, remove this once this PHOENIX-1621 is fixed
String name = expressionStr.replaceAll("\"", "'");
colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, name));
}
indexedColumnNames.add(colName);
PDataType dataType = IndexUtil.getIndexColumnDataType(expression.isNullable(), expression.getDataType());
allPkColumns.add(new ColumnDefInPkConstraint(colName, pair.getSecond(), isRowTimestamp));
columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), expression.isNullable(), expression.getMaxLength(), expression.getScale(), false, pair.getSecond(), expressionStr, isRowTimestamp));
}
// Next all the PK columns from the data table that aren't indexed
if (!unusedPkColumns.isEmpty()) {
for (RowKeyColumnExpression colExpression : unusedPkColumns) {
PColumn col = dataTable.getPKColumns().get(colExpression.getPosition());
// we don't need these in the index
if (col.getViewConstant() == null) {
ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
allPkColumns.add(new ColumnDefInPkConstraint(colName, colExpression.getSortOrder(), col.isRowTimestamp()));
PDataType dataType = IndexUtil.getIndexColumnDataType(colExpression.isNullable(), colExpression.getDataType());
columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), colExpression.isNullable(), colExpression.getMaxLength(), colExpression.getScale(), false, colExpression.getSortOrder(), colExpression.toString(), col.isRowTimestamp()));
}
}
}
// Last all the included columns (minus any PK columns)
for (ColumnName colName : includedColumns) {
PColumn col = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn();
colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
// Check for duplicates between indexed and included columns
if (indexedColumnNames.contains(colName)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_EXIST_IN_DEF).build().buildException();
}
if (!SchemaUtil.isPKColumn(col) && col.getViewConstant() == null) {
// Need to re-create ColumnName, since the above one won't have the column family name
colName = ColumnName.caseSensitiveColumnName(isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(col.getFamilyName().getString()) : col.getFamilyName().getString(), IndexUtil.getIndexColumnName(col));
columnDefs.add(FACTORY.columnDef(colName, col.getDataType().getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, col.getSortOrder(), col.getExpressionStr(), col.isRowTimestamp()));
}
}
// We need this in the props so that the correct column family is created
if (dataTable.getDefaultFamilyName() != null && dataTable.getType() != PTableType.VIEW && !allocateIndexId) {
statement.getProps().put("", new Pair<String, Object>(DEFAULT_COLUMN_FAMILY_NAME, dataTable.getDefaultFamilyName().getString()));
}
PrimaryKeyConstraint pk = FACTORY.primaryKey(null, allPkColumns);
tableProps.put(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME, dataTable.getName().getString());
CreateTableStatement tableStatement = FACTORY.createTable(indexTableName, statement.getProps(), columnDefs, pk, statement.getSplitNodes(), PTableType.INDEX, statement.ifNotExists(), null, null, statement.getBindCount(), null);
table = createTableInternal(tableStatement, splits, dataTable, null, null, null, null, allocateIndexId, statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps);
break;
} catch (ConcurrentTableMutationException e) {
// Can happen if parent data table changes while above is in progress
if (numRetries < 5) {
numRetries++;
continue;
}
throw e;
}
}
if (table == null) {
return new MutationState(0, 0, connection);
}
if (logger.isInfoEnabled())
logger.info("Created index " + table.getName().getString() + " at " + table.getTimeStamp());
boolean asyncIndexBuildEnabled = connection.getQueryServices().getProps().getBoolean(QueryServices.INDEX_ASYNC_BUILD_ENABLED, QueryServicesOptions.DEFAULT_INDEX_ASYNC_BUILD_ENABLED);
// In async process, we return immediately as the MR job needs to be triggered .
if (statement.isAsync() && asyncIndexBuildEnabled) {
return new MutationState(0, 0, connection);
}
// connection so that our new index table is visible.
if (connection.getSCN() != null) {
return buildIndexAtTimeStamp(table, statement.getTable());
}
return buildIndex(table, tableRef);
}
use of org.apache.phoenix.parse.ParseNode in project phoenix by apache.
the class PTableImpl method newKey.
@Override
public int newKey(ImmutableBytesWritable key, byte[][] values) {
List<PColumn> columns = getPKColumns();
int nValues = values.length;
while (nValues > 0 && (values[nValues - 1] == null || values[nValues - 1].length == 0)) {
nValues--;
}
for (PColumn column : columns) {
if (column.getExpressionStr() != null) {
nValues++;
}
}
int i = 0;
TrustedByteArrayOutputStream os = new TrustedByteArrayOutputStream(SchemaUtil.estimateKeyLength(this));
try {
Integer bucketNum = this.getBucketNum();
if (bucketNum != null) {
// Write place holder for salt byte
i++;
os.write(QueryConstants.SEPARATOR_BYTE_ARRAY);
}
int nColumns = columns.size();
PDataType type = null;
SortOrder sortOrder = null;
boolean wasNull = false;
while (i < nValues && i < nColumns) {
// Separate variable length column values in key with zero byte
if (type != null && !type.isFixedWidth()) {
os.write(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable(), wasNull, sortOrder));
}
PColumn column = columns.get(i);
sortOrder = column.getSortOrder();
type = column.getDataType();
// This will throw if the value is null and the type doesn't allow null
byte[] byteValue = values[i++];
if (byteValue == null) {
if (column.getExpressionStr() != null) {
try {
String url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + PhoenixRuntime.CONNECTIONLESS;
PhoenixConnection conn = DriverManager.getConnection(url).unwrap(PhoenixConnection.class);
StatementContext context = new StatementContext(new PhoenixStatement(conn));
ExpressionCompiler compiler = new ExpressionCompiler(context);
ParseNode defaultParseNode = new SQLParser(column.getExpressionStr()).parseExpression();
Expression defaultExpression = defaultParseNode.accept(compiler);
defaultExpression.evaluate(null, key);
column.getDataType().coerceBytes(key, null, defaultExpression.getDataType(), defaultExpression.getMaxLength(), defaultExpression.getScale(), defaultExpression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder());
byteValue = ByteUtil.copyKeyBytesIfNecessary(key);
} catch (SQLException e) {
// should not be possible
throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " failed to compile default value expression of " + column.getExpressionStr());
}
} else {
byteValue = ByteUtil.EMPTY_BYTE_ARRAY;
}
}
wasNull = byteValue.length == 0;
// here.
if (byteValue.length == 0 && !column.isNullable()) {
throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
}
Integer maxLength = column.getMaxLength();
Integer scale = column.getScale();
key.set(byteValue);
if (!type.isSizeCompatible(key, null, type, sortOrder, null, null, maxLength, scale)) {
throw new DataExceedsCapacityException(name.getString() + "." + column.getName().getString() + " may not exceed " + maxLength + " (" + SchemaUtil.toString(type, byteValue) + ")");
}
key.set(byteValue);
type.pad(key, maxLength, sortOrder);
byteValue = ByteUtil.copyKeyBytesIfNecessary(key);
os.write(byteValue, 0, byteValue.length);
}
// Need trailing byte for DESC columns
if (type != null && !type.isFixedWidth() && SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable(), wasNull, sortOrder) == QueryConstants.DESC_SEPARATOR_BYTE) {
os.write(QueryConstants.DESC_SEPARATOR_BYTE);
}
// If some non null pk values aren't set, then throw
if (i < nColumns) {
PColumn column = columns.get(i);
if (column.getDataType().isFixedWidth() || !column.isNullable()) {
throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
}
}
if (nValues == 0) {
throw new ConstraintViolationException("Primary key may not be null (" + name.getString() + ")");
}
byte[] buf = os.getBuffer();
int size = os.size();
if (bucketNum != null) {
buf[0] = SaltingUtil.getSaltingByte(buf, 1, size - 1, bucketNum);
}
key.set(buf, 0, size);
return i;
} finally {
try {
os.close();
} catch (IOException e) {
// Impossible
throw new RuntimeException(e);
}
}
}
use of org.apache.phoenix.parse.ParseNode in project phoenix by apache.
the class MetaDataEndpointImpl method dropColumnsFromChildViews.
private MetaDataMutationResult dropColumnsFromChildViews(Region region, PTable basePhysicalTable, List<RowLock> locks, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete) throws IOException, SQLException {
List<Delete> columnDeletesForBaseTable = new ArrayList<>(tableMetadata.size());
// are being added.
for (Mutation m : tableMetadata) {
if (m instanceof Delete) {
byte[][] rkmd = new byte[5][];
int pkCount = getVarChars(m.getRow(), rkmd);
if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
columnDeletesForBaseTable.add((Delete) m);
}
}
}
for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
short numColsDeleted = 0;
byte[] viewTenantId = viewInfo.getTenantId();
byte[] viewSchemaName = viewInfo.getSchemaName();
byte[] viewName = viewInfo.getViewName();
byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
// lock the rows corresponding to views so that no other thread can modify the view
// meta-data
RowLock viewRowLock = acquireLock(region, viewKey, locks);
PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
int numCols = view.getColumns().size();
int minDroppedColOrdinalPos = Integer.MAX_VALUE;
for (Delete columnDeleteForBaseTable : columnDeletesForBaseTable) {
PColumn existingViewColumn = null;
byte[][] rkmd = new byte[5][];
getVarChars(columnDeleteForBaseTable.getRow(), rkmd);
String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
try {
existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
} catch (ColumnFamilyNotFoundException e) {
// ignore since it means that the column family is not present for the column to
// be added.
} catch (ColumnNotFoundException e) {
// ignore since it means the column is not present in the view
}
// it
if (existingViewColumn != null && view.getViewStatement() != null) {
ParseNode viewWhere = new SQLParser(view.getViewStatement()).parseQuery().getWhere();
PhoenixConnection conn = null;
try {
conn = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
} catch (ClassNotFoundException e) {
}
PhoenixStatement statement = new PhoenixStatement(conn);
TableRef baseTableRef = new TableRef(basePhysicalTable);
ColumnResolver columnResolver = FromCompiler.getResolver(baseTableRef);
StatementContext context = new StatementContext(statement, columnResolver);
Expression whereExpression = WhereCompiler.compile(context, viewWhere);
Expression colExpression = new ColumnRef(baseTableRef, existingViewColumn.getPosition()).newColumnExpression();
ColumnFinder columnFinder = new ColumnFinder(colExpression);
whereExpression.accept(columnFinder);
if (columnFinder.getColumnFound()) {
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
}
}
minDroppedColOrdinalPos = Math.min(getOrdinalPosition(view, existingViewColumn), minDroppedColOrdinalPos);
if (existingViewColumn != null) {
--numColsDeleted;
if (ordinalPositionList.size() == 0) {
ordinalPositionList.setOffset(view.getBucketNum() == null ? 1 : 0);
for (PColumn col : view.getColumns()) {
ordinalPositionList.addColumn(getColumnKey(viewKey, col));
}
}
ordinalPositionList.dropColumn(columnKey);
Delete viewColumnDelete = new Delete(columnKey, clientTimeStamp);
mutationsForAddingColumnsToViews.add(viewColumnDelete);
// drop any view indexes that need this column
dropIndexes(view, region, invalidateList, locks, clientTimeStamp, schemaName, view.getName().getBytes(), mutationsForAddingColumnsToViews, existingViewColumn, tableNamesToDelete, sharedTablesToDelete);
}
}
updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, numColsDeleted, numColsDeleted, viewKey, view, ordinalPositionList, numCols, true);
}
return null;
}
use of org.apache.phoenix.parse.ParseNode in project phoenix by apache.
the class WhereCompiler method compile.
/**
* Optimize scan ranges by applying dynamically generated filter expressions.
* @param context the shared context during query compilation
* @param statement TODO
* @throws SQLException if mismatched types are found, bind value do not match binds,
* or invalid function arguments are encountered.
* @throws SQLFeatureNotSupportedException if an unsupported expression is encountered.
* @throws ColumnNotFoundException if column name could not be resolved
* @throws AmbiguousColumnException if an unaliased column name is ambiguous across multiple tables
*/
public static Expression compile(StatementContext context, FilterableStatement statement, ParseNode viewWhere, List<Expression> dynamicFilters, boolean hashJoinOptimization, Set<SubqueryParseNode> subqueryNodes) throws SQLException {
ParseNode where = statement.getWhere();
if (subqueryNodes != null) {
// if the subqueryNodes passed in is null, we assume there will be no sub-queries in the WHERE clause.
SubqueryParseNodeVisitor subqueryVisitor = new SubqueryParseNodeVisitor(context, subqueryNodes);
if (where != null) {
where.accept(subqueryVisitor);
}
if (viewWhere != null) {
viewWhere.accept(subqueryVisitor);
}
if (!subqueryNodes.isEmpty()) {
return null;
}
}
Set<Expression> extractedNodes = Sets.<Expression>newHashSet();
WhereExpressionCompiler whereCompiler = new WhereExpressionCompiler(context);
Expression expression = where == null ? LiteralExpression.newConstant(true, PBoolean.INSTANCE, Determinism.ALWAYS) : where.accept(whereCompiler);
if (whereCompiler.isAggregate()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_IN_WHERE).build().buildException();
}
if (expression.getDataType() != PBoolean.INSTANCE) {
throw TypeMismatchException.newException(PBoolean.INSTANCE, expression.getDataType(), expression.toString());
}
if (viewWhere != null) {
WhereExpressionCompiler viewWhereCompiler = new WhereExpressionCompiler(context, true);
Expression viewExpression = viewWhere.accept(viewWhereCompiler);
expression = AndExpression.create(Lists.newArrayList(expression, viewExpression));
}
if (!dynamicFilters.isEmpty()) {
List<Expression> filters = Lists.newArrayList(expression);
filters.addAll(dynamicFilters);
expression = AndExpression.create(filters);
}
if (context.getCurrentTable().getTable().getType() != PTableType.PROJECTED && context.getCurrentTable().getTable().getType() != PTableType.SUBQUERY) {
expression = WhereOptimizer.pushKeyExpressionsToScan(context, statement, expression, extractedNodes);
}
setScanFilter(context, statement, expression, whereCompiler.disambiguateWithFamily, hashJoinOptimization);
return expression;
}
use of org.apache.phoenix.parse.ParseNode in project phoenix by apache.
the class QueryCompiler method compileSingleFlatQuery.
protected QueryPlan compileSingleFlatQuery(StatementContext context, SelectStatement select, List<Object> binds, boolean asSubquery, boolean allowPageFilter, QueryPlan innerPlan, TupleProjector innerPlanTupleProjector, boolean isInRowKeyOrder) throws SQLException {
PTable projectedTable = null;
if (this.projectTuples) {
projectedTable = TupleProjectionCompiler.createProjectedTable(select, context);
if (projectedTable != null) {
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), select.getUdfParseNodes()));
}
}
ColumnResolver resolver = context.getResolver();
TableRef tableRef = context.getCurrentTable();
PTable table = tableRef.getTable();
ParseNode viewWhere = null;
if (table.getViewStatement() != null) {
viewWhere = new SQLParser(table.getViewStatement()).parseQuery().getWhere();
}
Integer limit = LimitCompiler.compile(context, select);
Integer offset = OffsetCompiler.compile(context, select);
GroupBy groupBy = GroupByCompiler.compile(context, select, isInRowKeyOrder);
// Optimize the HAVING clause by finding any group by expressions that can be moved
// to the WHERE clause
select = HavingCompiler.rewrite(context, select, groupBy);
Expression having = HavingCompiler.compile(context, select, groupBy);
// expressions as group by key expressions since they're pre, not post filtered.
if (innerPlan == null && !tableRef.equals(resolver.getTables().get(0))) {
context.setResolver(FromCompiler.getResolver(context.getConnection(), tableRef, select.getUdfParseNodes()));
}
Set<SubqueryParseNode> subqueries = Sets.<SubqueryParseNode>newHashSet();
Expression where = WhereCompiler.compile(context, select, viewWhere, subqueries);
// Recompile GROUP BY now that we've figured out our ScanRanges so we know
// definitively whether or not we'll traverse in row key order.
groupBy = groupBy.compile(context, innerPlanTupleProjector);
// recover resolver
context.setResolver(resolver);
RowProjector projector = ProjectionCompiler.compile(context, select, groupBy, asSubquery ? Collections.<PDatum>emptyList() : targetColumns, where);
OrderBy orderBy = OrderByCompiler.compile(context, select, groupBy, limit, offset, projector, groupBy == GroupBy.EMPTY_GROUP_BY ? innerPlanTupleProjector : null, isInRowKeyOrder);
context.getAggregationManager().compile(context, groupBy);
// Final step is to build the query plan
if (!asSubquery) {
int maxRows = statement.getMaxRows();
if (maxRows > 0) {
if (limit != null) {
limit = Math.min(limit, maxRows);
} else {
limit = maxRows;
}
}
}
if (projectedTable != null) {
TupleProjector.serializeProjectorIntoScan(context.getScan(), new TupleProjector(projectedTable));
}
QueryPlan plan = innerPlan;
if (plan == null) {
ParallelIteratorFactory parallelIteratorFactory = asSubquery ? null : this.parallelIteratorFactory;
plan = select.getFrom() == null ? new LiteralResultIterationPlan(context, select, tableRef, projector, limit, offset, orderBy, parallelIteratorFactory) : (select.isAggregate() || select.isDistinct() ? new AggregatePlan(context, select, tableRef, projector, limit, offset, orderBy, parallelIteratorFactory, groupBy, having) : new ScanPlan(context, select, tableRef, projector, limit, offset, orderBy, parallelIteratorFactory, allowPageFilter));
}
if (!subqueries.isEmpty()) {
int count = subqueries.size();
WhereClauseSubPlan[] subPlans = new WhereClauseSubPlan[count];
int i = 0;
for (SubqueryParseNode subqueryNode : subqueries) {
SelectStatement stmt = subqueryNode.getSelectNode();
subPlans[i++] = new WhereClauseSubPlan(compileSubquery(stmt, false), stmt, subqueryNode.expectSingleRow());
}
plan = HashJoinPlan.create(select, plan, null, subPlans);
}
if (innerPlan != null) {
if (LiteralExpression.isTrue(where)) {
// we do not pass "true" as filter
where = null;
}
plan = select.isAggregate() || select.isDistinct() ? new ClientAggregatePlan(context, select, tableRef, projector, limit, offset, where, orderBy, groupBy, having, plan) : new ClientScanPlan(context, select, tableRef, projector, limit, offset, where, orderBy, plan);
}
return plan;
}
Aggregations