use of org.apache.phoenix.parse.ParseNode in project phoenix by apache.
the class JoinCompiler method optimize.
public static SelectStatement optimize(PhoenixStatement statement, SelectStatement select, final ColumnResolver resolver) throws SQLException {
TableRef groupByTableRef = null;
TableRef orderByTableRef = null;
if (select.getGroupBy() != null && !select.getGroupBy().isEmpty()) {
ColumnRefParseNodeVisitor groupByVisitor = new ColumnRefParseNodeVisitor(resolver, statement.getConnection());
for (ParseNode node : select.getGroupBy()) {
node.accept(groupByVisitor);
}
Set<TableRef> set = groupByVisitor.getTableRefSet();
if (set.size() == 1) {
groupByTableRef = set.iterator().next();
}
} else if (select.getOrderBy() != null && !select.getOrderBy().isEmpty()) {
ColumnRefParseNodeVisitor orderByVisitor = new ColumnRefParseNodeVisitor(resolver, statement.getConnection());
for (OrderByNode node : select.getOrderBy()) {
node.getNode().accept(orderByVisitor);
}
Set<TableRef> set = orderByVisitor.getTableRefSet();
if (set.size() == 1) {
orderByTableRef = set.iterator().next();
}
}
JoinTable join = compile(statement, select, resolver);
if (groupByTableRef != null || orderByTableRef != null) {
QueryCompiler compiler = new QueryCompiler(statement, select, resolver, false);
List<Object> binds = statement.getParameters();
StatementContext ctx = new StatementContext(statement, resolver, new Scan(), new SequenceManager(statement));
QueryPlan plan = compiler.compileJoinQuery(ctx, binds, join, false, false, null);
TableRef table = plan.getTableRef();
if (groupByTableRef != null && !groupByTableRef.equals(table)) {
groupByTableRef = null;
}
if (orderByTableRef != null && !orderByTableRef.equals(table)) {
orderByTableRef = null;
}
}
final Map<TableRef, TableRef> replacement = new HashMap<TableRef, TableRef>();
for (Table table : join.getTables()) {
if (table.isSubselect())
continue;
TableRef tableRef = table.getTableRef();
List<ParseNode> groupBy = tableRef.equals(groupByTableRef) ? select.getGroupBy() : null;
List<OrderByNode> orderBy = tableRef.equals(orderByTableRef) ? select.getOrderBy() : null;
SelectStatement stmt = getSubqueryForOptimizedPlan(select.getHint(), table.getDynamicColumns(), tableRef, join.getColumnRefs(), table.getPreFiltersCombined(), groupBy, orderBy, table.isWildCardSelect(), select.hasSequence(), select.getUdfParseNodes());
QueryPlan plan = statement.getConnection().getQueryServices().getOptimizer().optimize(statement, stmt);
if (!plan.getTableRef().equals(tableRef)) {
replacement.put(tableRef, plan.getTableRef());
}
}
if (replacement.isEmpty())
return select;
TableNode from = select.getFrom();
TableNode newFrom = from.accept(new TableNodeVisitor<TableNode>() {
private TableRef resolveTable(String alias, TableName name) throws SQLException {
if (alias != null)
return resolver.resolveTable(null, alias);
return resolver.resolveTable(name.getSchemaName(), name.getTableName());
}
private TableName getReplacedTableName(TableRef tableRef) {
String schemaName = tableRef.getTable().getSchemaName().getString();
return TableName.create(schemaName.length() == 0 ? null : schemaName, tableRef.getTable().getTableName().getString());
}
@Override
public TableNode visit(BindTableNode boundTableNode) throws SQLException {
TableRef tableRef = resolveTable(boundTableNode.getAlias(), boundTableNode.getName());
TableRef replaceRef = replacement.get(tableRef);
if (replaceRef == null)
return boundTableNode;
String alias = boundTableNode.getAlias();
return NODE_FACTORY.bindTable(alias == null ? null : '"' + alias + '"', getReplacedTableName(replaceRef));
}
@Override
public TableNode visit(JoinTableNode joinNode) throws SQLException {
TableNode lhs = joinNode.getLHS();
TableNode rhs = joinNode.getRHS();
TableNode lhsReplace = lhs.accept(this);
TableNode rhsReplace = rhs.accept(this);
if (lhs == lhsReplace && rhs == rhsReplace)
return joinNode;
return NODE_FACTORY.join(joinNode.getType(), lhsReplace, rhsReplace, joinNode.getOnNode(), joinNode.isSingleValueOnly());
}
@Override
public TableNode visit(NamedTableNode namedTableNode) throws SQLException {
TableRef tableRef = resolveTable(namedTableNode.getAlias(), namedTableNode.getName());
TableRef replaceRef = replacement.get(tableRef);
if (replaceRef == null)
return namedTableNode;
String alias = namedTableNode.getAlias();
return NODE_FACTORY.namedTable(alias == null ? null : '"' + alias + '"', getReplacedTableName(replaceRef), namedTableNode.getDynamicColumns());
}
@Override
public TableNode visit(DerivedTableNode subselectNode) throws SQLException {
return subselectNode;
}
});
SelectStatement indexSelect = IndexStatementRewriter.translate(NODE_FACTORY.select(select, newFrom), resolver, replacement);
for (TableRef indexTableRef : replacement.values()) {
// replace expressions with corresponding matching columns for functional indexes
indexSelect = ParseNodeRewriter.rewrite(indexSelect, new IndexExpressionParseNodeRewriter(indexTableRef.getTable(), indexTableRef.getTableAlias(), statement.getConnection(), indexSelect.getUdfParseNodes()));
}
return indexSelect;
}
use of org.apache.phoenix.parse.ParseNode in project phoenix by apache.
the class CreateIndexCompiler method compile.
public MutationPlan compile(final CreateIndexStatement create) throws SQLException {
final PhoenixConnection connection = statement.getConnection();
final ColumnResolver resolver = FromCompiler.getResolver(create, connection, create.getUdfParseNodes());
Scan scan = new Scan();
final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);
List<ParseNode> splitNodes = create.getSplitNodes();
if (create.getIndexType() == IndexType.LOCAL) {
if (!splitNodes.isEmpty()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPLIT_LOCAL_INDEX).build().buildException();
}
List<Pair<String, Object>> list = create.getProps() != null ? create.getProps().get("") : null;
if (list != null) {
for (Pair<String, Object> pair : list) {
if (pair.getFirst().equals(PhoenixDatabaseMetaData.SALT_BUCKETS)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SALT_LOCAL_INDEX).build().buildException();
}
}
}
}
final byte[][] splits = new byte[splitNodes.size()][];
for (int i = 0; i < splits.length; i++) {
ParseNode node = splitNodes.get(i);
if (!node.isStateless()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.SPLIT_POINT_NOT_CONSTANT).setMessage("Node: " + node).build().buildException();
}
LiteralExpression expression = (LiteralExpression) node.accept(expressionCompiler);
splits[i] = expression.getBytes();
}
final MetaDataClient client = new MetaDataClient(connection);
return new BaseMutationPlan(context, operation) {
@Override
public MutationState execute() throws SQLException {
return client.createIndex(create, splits);
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
return new ExplainPlan(Collections.singletonList("CREATE INDEX"));
}
};
}
use of org.apache.phoenix.parse.ParseNode in project phoenix by apache.
the class CreateTableCompiler method compile.
public MutationPlan compile(CreateTableStatement create) throws SQLException {
final PhoenixConnection connection = statement.getConnection();
ColumnResolver resolver = FromCompiler.getResolverForCreation(create, connection);
PTableType type = create.getTableType();
PhoenixConnection connectionToBe = connection;
PTable parentToBe = null;
ViewType viewTypeToBe = null;
Scan scan = new Scan();
final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
// TODO: support any statement for a VIEW instead of just a WHERE clause
ParseNode whereNode = create.getWhereClause();
String viewStatementToBe = null;
byte[][] viewColumnConstantsToBe = null;
BitSet isViewColumnReferencedToBe = null;
// Check whether column families having local index column family suffix or not if present
// don't allow creating table.
// Also validate the default values expressions.
List<ColumnDef> columnDefs = create.getColumnDefs();
List<ColumnDef> overideColumnDefs = null;
PrimaryKeyConstraint pkConstraint = create.getPrimaryKeyConstraint();
for (int i = 0; i < columnDefs.size(); i++) {
ColumnDef columnDef = columnDefs.get(i);
if (columnDef.getColumnDefName().getFamilyName() != null && columnDef.getColumnDefName().getFamilyName().contains(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_COLUMN_FAMILY).build().buildException();
}
// False means we do not need the default (because it evaluated to null)
if (!columnDef.validateDefault(context, pkConstraint)) {
if (overideColumnDefs == null) {
overideColumnDefs = new ArrayList<>(columnDefs);
}
overideColumnDefs.set(i, new ColumnDef(columnDef, null));
}
}
if (overideColumnDefs != null) {
create = new CreateTableStatement(create, overideColumnDefs);
}
final CreateTableStatement finalCreate = create;
if (type == PTableType.VIEW) {
TableRef tableRef = resolver.getTables().get(0);
int nColumns = tableRef.getTable().getColumns().size();
isViewColumnReferencedToBe = new BitSet(nColumns);
// Used to track column references in a view
ExpressionCompiler expressionCompiler = new ColumnTrackingExpressionCompiler(context, isViewColumnReferencedToBe);
parentToBe = tableRef.getTable();
viewTypeToBe = parentToBe.getViewType() == ViewType.MAPPED ? ViewType.MAPPED : ViewType.UPDATABLE;
if (whereNode == null) {
viewStatementToBe = parentToBe.getViewStatement();
} else {
whereNode = StatementNormalizer.normalize(whereNode, resolver);
if (whereNode.isStateless()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WHERE_IS_CONSTANT).build().buildException();
}
// If our parent has a VIEW statement, combine it with this one
if (parentToBe.getViewStatement() != null) {
SelectStatement select = new SQLParser(parentToBe.getViewStatement()).parseQuery().combine(whereNode);
whereNode = select.getWhere();
}
Expression where = whereNode.accept(expressionCompiler);
if (where != null && !LiteralExpression.isTrue(where)) {
TableName baseTableName = create.getBaseTableName();
StringBuilder buf = new StringBuilder();
whereNode.toSQL(resolver, buf);
viewStatementToBe = QueryUtil.getViewStatement(baseTableName.getSchemaName(), baseTableName.getTableName(), buf.toString());
}
if (viewTypeToBe != ViewType.MAPPED) {
Long scn = connection.getSCN();
connectionToBe = (scn != null || tableRef.getTable().isTransactional()) ? connection : // clocks being in sync.
new PhoenixConnection(// on our connection.
new DelegateConnectionQueryServices(connection.getQueryServices()) {
@Override
public void addTable(PTable table, long resolvedTime) throws SQLException {
connection.addTable(table, resolvedTime);
}
}, connection, tableRef.getTimeStamp() + 1);
viewColumnConstantsToBe = new byte[nColumns][];
ViewWhereExpressionVisitor visitor = new ViewWhereExpressionVisitor(parentToBe, viewColumnConstantsToBe);
where.accept(visitor);
// If view is not updatable, viewColumnConstants should be empty. We will still
// inherit our parent viewConstants, but we have no additional ones.
viewTypeToBe = visitor.isUpdatable() ? ViewType.UPDATABLE : ViewType.READ_ONLY;
if (viewTypeToBe != ViewType.UPDATABLE) {
viewColumnConstantsToBe = null;
}
}
}
}
final ViewType viewType = viewTypeToBe;
final String viewStatement = viewStatementToBe;
final byte[][] viewColumnConstants = viewColumnConstantsToBe;
final BitSet isViewColumnReferenced = isViewColumnReferencedToBe;
List<ParseNode> splitNodes = create.getSplitNodes();
final byte[][] splits = new byte[splitNodes.size()][];
ImmutableBytesWritable ptr = context.getTempPtr();
ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);
for (int i = 0; i < splits.length; i++) {
ParseNode node = splitNodes.get(i);
if (node instanceof BindParseNode) {
context.getBindManager().addParamMetaData((BindParseNode) node, VARBINARY_DATUM);
}
if (node.isStateless()) {
Expression expression = node.accept(expressionCompiler);
if (expression.evaluate(null, ptr)) {
;
splits[i] = ByteUtil.copyKeyBytesIfNecessary(ptr);
continue;
}
}
throw new SQLExceptionInfo.Builder(SQLExceptionCode.SPLIT_POINT_NOT_CONSTANT).setMessage("Node: " + node).build().buildException();
}
final MetaDataClient client = new MetaDataClient(connectionToBe);
final PTable parent = parentToBe;
return new BaseMutationPlan(context, operation) {
@Override
public MutationState execute() throws SQLException {
try {
return client.createTable(finalCreate, splits, parent, viewStatement, viewType, viewColumnConstants, isViewColumnReferenced);
} finally {
if (client.getConnection() != connection) {
client.getConnection().close();
}
}
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
return new ExplainPlan(Collections.singletonList("CREATE TABLE"));
}
};
}
use of org.apache.phoenix.parse.ParseNode in project phoenix by apache.
the class HashJoinPlan method iterator.
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
if (scan == null) {
scan = delegate.getContext().getScan();
}
int count = subPlans.length;
PhoenixConnection connection = getContext().getConnection();
ConnectionQueryServices services = connection.getQueryServices();
ExecutorService executor = services.getExecutor();
List<Future<ServerCache>> futures = Lists.newArrayListWithExpectedSize(count);
if (joinInfo != null) {
hashClient = hashClient != null ? hashClient : new HashCacheClient(delegate.getContext().getConnection());
firstJobEndTime = new AtomicLong(0);
keyRangeExpressions = new CopyOnWriteArrayList<Expression>();
}
for (int i = 0; i < count; i++) {
final int index = i;
futures.add(executor.submit(new JobCallable<ServerCache>() {
@Override
public ServerCache call() throws Exception {
ServerCache cache = subPlans[index].execute(HashJoinPlan.this);
return cache;
}
@Override
public Object getJobId() {
return HashJoinPlan.this;
}
@Override
public TaskExecutionMetricsHolder getTaskExecutionMetric() {
return NO_OP_INSTANCE;
}
}));
}
SQLException firstException = null;
for (int i = 0; i < count; i++) {
try {
ServerCache result = futures.get(i).get();
if (result != null) {
dependencies.add(result);
}
subPlans[i].postProcess(result, this);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
if (firstException == null) {
firstException = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).setMessage("Sub plan [" + i + "] execution interrupted.").build().buildException();
}
} catch (ExecutionException e) {
if (firstException == null) {
firstException = new SQLException("Encountered exception in sub plan [" + i + "] execution.", e.getCause());
}
}
}
if (firstException != null) {
SQLCloseables.closeAllQuietly(dependencies);
throw firstException;
}
Expression postFilter = null;
boolean hasKeyRangeExpressions = keyRangeExpressions != null && !keyRangeExpressions.isEmpty();
if (recompileWhereClause || hasKeyRangeExpressions) {
StatementContext context = delegate.getContext();
PTable table = context.getCurrentTable().getTable();
ParseNode viewWhere = table.getViewStatement() == null ? null : new SQLParser(table.getViewStatement()).parseQuery().getWhere();
context.setResolver(FromCompiler.getResolverForQuery((SelectStatement) (delegate.getStatement()), delegate.getContext().getConnection()));
if (recompileWhereClause) {
postFilter = WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, null);
}
if (hasKeyRangeExpressions) {
WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, keyRangeExpressions, true, null);
}
}
if (joinInfo != null) {
HashJoinInfo.serializeHashJoinIntoScan(scan, joinInfo);
}
ResultIterator iterator = joinInfo == null ? delegate.iterator(scanGrouper, scan) : ((BaseQueryPlan) delegate).iterator(dependencies, scanGrouper, scan);
if (statement.getInnerSelectStatement() != null && postFilter != null) {
iterator = new FilterResultIterator(iterator, postFilter);
}
return iterator;
}
use of org.apache.phoenix.parse.ParseNode in project phoenix by apache.
the class MetaDataEndpointImpl method dropColumnsFromChildViews.
private MetaDataMutationResult dropColumnsFromChildViews(Region region, PTable basePhysicalTable, List<RowLock> locks, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete) throws IOException, SQLException {
List<Delete> columnDeletesForBaseTable = new ArrayList<>(tableMetadata.size());
// are being added.
for (Mutation m : tableMetadata) {
if (m instanceof Delete) {
byte[][] rkmd = new byte[5][];
int pkCount = getVarChars(m.getRow(), rkmd);
if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
columnDeletesForBaseTable.add((Delete) m);
}
}
}
for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
short numColsDeleted = 0;
byte[] viewTenantId = viewInfo.getTenantId();
byte[] viewSchemaName = viewInfo.getSchemaName();
byte[] viewName = viewInfo.getViewName();
byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
// lock the rows corresponding to views so that no other thread can modify the view
// meta-data
RowLock viewRowLock = acquireLock(region, viewKey, locks);
PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
int numCols = view.getColumns().size();
int minDroppedColOrdinalPos = Integer.MAX_VALUE;
for (Delete columnDeleteForBaseTable : columnDeletesForBaseTable) {
PColumn existingViewColumn = null;
byte[][] rkmd = new byte[5][];
getVarChars(columnDeleteForBaseTable.getRow(), rkmd);
String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
try {
existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
} catch (ColumnFamilyNotFoundException e) {
// ignore since it means that the column family is not present for the column to
// be added.
} catch (ColumnNotFoundException e) {
// ignore since it means the column is not present in the view
}
// it
if (existingViewColumn != null && view.getViewStatement() != null) {
ParseNode viewWhere = new SQLParser(view.getViewStatement()).parseQuery().getWhere();
PhoenixConnection conn = null;
try {
conn = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
} catch (ClassNotFoundException e) {
}
PhoenixStatement statement = new PhoenixStatement(conn);
TableRef baseTableRef = new TableRef(basePhysicalTable);
ColumnResolver columnResolver = FromCompiler.getResolver(baseTableRef);
StatementContext context = new StatementContext(statement, columnResolver);
Expression whereExpression = WhereCompiler.compile(context, viewWhere);
Expression colExpression = new ColumnRef(baseTableRef, existingViewColumn.getPosition()).newColumnExpression();
ColumnFinder columnFinder = new ColumnFinder(colExpression);
whereExpression.accept(columnFinder);
if (columnFinder.getColumnFound()) {
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
}
}
minDroppedColOrdinalPos = Math.min(getOrdinalPosition(view, existingViewColumn), minDroppedColOrdinalPos);
if (existingViewColumn != null) {
--numColsDeleted;
if (ordinalPositionList.size() == 0) {
ordinalPositionList.setOffset(view.getBucketNum() == null ? 1 : 0);
for (PColumn col : view.getColumns()) {
ordinalPositionList.addColumn(getColumnKey(viewKey, col));
}
}
ordinalPositionList.dropColumn(columnKey);
Delete viewColumnDelete = new Delete(columnKey, clientTimeStamp);
mutationsForAddingColumnsToViews.add(viewColumnDelete);
// drop any view indexes that need this column
dropIndexes(view, region, invalidateList, locks, clientTimeStamp, schemaName, view.getName().getBytes(), mutationsForAddingColumnsToViews, existingViewColumn, tableNamesToDelete, sharedTablesToDelete);
}
}
updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, numColsDeleted, numColsDeleted, viewKey, view, ordinalPositionList, numCols, true);
}
return null;
}
Aggregations