use of org.apache.phoenix.parse.SQLParser in project phoenix by apache.
the class PTableImpl method newKey.
@Override
public int newKey(ImmutableBytesWritable key, byte[][] values) {
List<PColumn> columns = getPKColumns();
int nValues = values.length;
while (nValues > 0 && (values[nValues - 1] == null || values[nValues - 1].length == 0)) {
nValues--;
}
for (PColumn column : columns) {
if (column.getExpressionStr() != null) {
nValues++;
}
}
int i = 0;
TrustedByteArrayOutputStream os = new TrustedByteArrayOutputStream(SchemaUtil.estimateKeyLength(this));
try {
Integer bucketNum = this.getBucketNum();
if (bucketNum != null) {
// Write place holder for salt byte
i++;
os.write(QueryConstants.SEPARATOR_BYTE_ARRAY);
}
int nColumns = columns.size();
PDataType type = null;
SortOrder sortOrder = null;
boolean wasNull = false;
while (i < nValues && i < nColumns) {
// Separate variable length column values in key with zero byte
if (type != null && !type.isFixedWidth()) {
os.write(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable(), wasNull, sortOrder));
}
PColumn column = columns.get(i);
sortOrder = column.getSortOrder();
type = column.getDataType();
// This will throw if the value is null and the type doesn't allow null
byte[] byteValue = values[i++];
if (byteValue == null) {
if (column.getExpressionStr() != null) {
try {
String url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + PhoenixRuntime.CONNECTIONLESS;
PhoenixConnection conn = DriverManager.getConnection(url).unwrap(PhoenixConnection.class);
StatementContext context = new StatementContext(new PhoenixStatement(conn));
ExpressionCompiler compiler = new ExpressionCompiler(context);
ParseNode defaultParseNode = new SQLParser(column.getExpressionStr()).parseExpression();
Expression defaultExpression = defaultParseNode.accept(compiler);
defaultExpression.evaluate(null, key);
column.getDataType().coerceBytes(key, null, defaultExpression.getDataType(), defaultExpression.getMaxLength(), defaultExpression.getScale(), defaultExpression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder());
byteValue = ByteUtil.copyKeyBytesIfNecessary(key);
} catch (SQLException e) {
// should not be possible
throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " failed to compile default value expression of " + column.getExpressionStr());
}
} else {
byteValue = ByteUtil.EMPTY_BYTE_ARRAY;
}
}
wasNull = byteValue.length == 0;
// here.
if (byteValue.length == 0 && !column.isNullable()) {
throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
}
Integer maxLength = column.getMaxLength();
Integer scale = column.getScale();
key.set(byteValue);
if (!type.isSizeCompatible(key, null, type, sortOrder, null, null, maxLength, scale)) {
throw new DataExceedsCapacityException(name.getString() + "." + column.getName().getString() + " may not exceed " + maxLength + " (" + SchemaUtil.toString(type, byteValue) + ")");
}
key.set(byteValue);
type.pad(key, maxLength, sortOrder);
byteValue = ByteUtil.copyKeyBytesIfNecessary(key);
os.write(byteValue, 0, byteValue.length);
}
// Need trailing byte for DESC columns
if (type != null && !type.isFixedWidth() && SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable(), wasNull, sortOrder) == QueryConstants.DESC_SEPARATOR_BYTE) {
os.write(QueryConstants.DESC_SEPARATOR_BYTE);
}
// If some non null pk values aren't set, then throw
if (i < nColumns) {
PColumn column = columns.get(i);
if (column.getDataType().isFixedWidth() || !column.isNullable()) {
throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
}
}
if (nValues == 0) {
throw new ConstraintViolationException("Primary key may not be null (" + name.getString() + ")");
}
byte[] buf = os.getBuffer();
int size = os.size();
if (bucketNum != null) {
buf[0] = SaltingUtil.getSaltingByte(buf, 1, size - 1, bucketNum);
}
key.set(buf, 0, size);
return i;
} finally {
try {
os.close();
} catch (IOException e) {
// Impossible
throw new RuntimeException(e);
}
}
}
use of org.apache.phoenix.parse.SQLParser in project phoenix by apache.
the class MetaDataEndpointImpl method dropColumnsFromChildViews.
private MetaDataMutationResult dropColumnsFromChildViews(Region region, PTable basePhysicalTable, List<RowLock> locks, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete) throws IOException, SQLException {
List<Delete> columnDeletesForBaseTable = new ArrayList<>(tableMetadata.size());
// are being added.
for (Mutation m : tableMetadata) {
if (m instanceof Delete) {
byte[][] rkmd = new byte[5][];
int pkCount = getVarChars(m.getRow(), rkmd);
if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
columnDeletesForBaseTable.add((Delete) m);
}
}
}
for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
short numColsDeleted = 0;
byte[] viewTenantId = viewInfo.getTenantId();
byte[] viewSchemaName = viewInfo.getSchemaName();
byte[] viewName = viewInfo.getViewName();
byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
// lock the rows corresponding to views so that no other thread can modify the view
// meta-data
RowLock viewRowLock = acquireLock(region, viewKey, locks);
PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
int numCols = view.getColumns().size();
int minDroppedColOrdinalPos = Integer.MAX_VALUE;
for (Delete columnDeleteForBaseTable : columnDeletesForBaseTable) {
PColumn existingViewColumn = null;
byte[][] rkmd = new byte[5][];
getVarChars(columnDeleteForBaseTable.getRow(), rkmd);
String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
try {
existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
} catch (ColumnFamilyNotFoundException e) {
// ignore since it means that the column family is not present for the column to
// be added.
} catch (ColumnNotFoundException e) {
// ignore since it means the column is not present in the view
}
// it
if (existingViewColumn != null && view.getViewStatement() != null) {
ParseNode viewWhere = new SQLParser(view.getViewStatement()).parseQuery().getWhere();
PhoenixConnection conn = null;
try {
conn = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
} catch (ClassNotFoundException e) {
}
PhoenixStatement statement = new PhoenixStatement(conn);
TableRef baseTableRef = new TableRef(basePhysicalTable);
ColumnResolver columnResolver = FromCompiler.getResolver(baseTableRef);
StatementContext context = new StatementContext(statement, columnResolver);
Expression whereExpression = WhereCompiler.compile(context, viewWhere);
Expression colExpression = new ColumnRef(baseTableRef, existingViewColumn.getPosition()).newColumnExpression();
ColumnFinder columnFinder = new ColumnFinder(colExpression);
whereExpression.accept(columnFinder);
if (columnFinder.getColumnFound()) {
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
}
}
minDroppedColOrdinalPos = Math.min(getOrdinalPosition(view, existingViewColumn), minDroppedColOrdinalPos);
if (existingViewColumn != null) {
--numColsDeleted;
if (ordinalPositionList.size() == 0) {
ordinalPositionList.setOffset(view.getBucketNum() == null ? 1 : 0);
for (PColumn col : view.getColumns()) {
ordinalPositionList.addColumn(getColumnKey(viewKey, col));
}
}
ordinalPositionList.dropColumn(columnKey);
Delete viewColumnDelete = new Delete(columnKey, clientTimeStamp);
mutationsForAddingColumnsToViews.add(viewColumnDelete);
// drop any view indexes that need this column
dropIndexes(view, region, invalidateList, locks, clientTimeStamp, schemaName, view.getName().getBytes(), mutationsForAddingColumnsToViews, existingViewColumn, tableNamesToDelete, sharedTablesToDelete);
}
}
updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, numColsDeleted, numColsDeleted, viewKey, view, ordinalPositionList, numCols, true);
}
return null;
}
use of org.apache.phoenix.parse.SQLParser in project phoenix by apache.
the class QueryCompiler method compileSingleFlatQuery.
protected QueryPlan compileSingleFlatQuery(StatementContext context, SelectStatement select, List<Object> binds, boolean asSubquery, boolean allowPageFilter, QueryPlan innerPlan, TupleProjector innerPlanTupleProjector, boolean isInRowKeyOrder) throws SQLException {
PTable projectedTable = null;
if (this.projectTuples) {
projectedTable = TupleProjectionCompiler.createProjectedTable(select, context);
if (projectedTable != null) {
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), select.getUdfParseNodes()));
}
}
ColumnResolver resolver = context.getResolver();
TableRef tableRef = context.getCurrentTable();
PTable table = tableRef.getTable();
ParseNode viewWhere = null;
if (table.getViewStatement() != null) {
viewWhere = new SQLParser(table.getViewStatement()).parseQuery().getWhere();
}
Integer limit = LimitCompiler.compile(context, select);
Integer offset = OffsetCompiler.compile(context, select);
GroupBy groupBy = GroupByCompiler.compile(context, select, isInRowKeyOrder);
// Optimize the HAVING clause by finding any group by expressions that can be moved
// to the WHERE clause
select = HavingCompiler.rewrite(context, select, groupBy);
Expression having = HavingCompiler.compile(context, select, groupBy);
// expressions as group by key expressions since they're pre, not post filtered.
if (innerPlan == null && !tableRef.equals(resolver.getTables().get(0))) {
context.setResolver(FromCompiler.getResolver(context.getConnection(), tableRef, select.getUdfParseNodes()));
}
Set<SubqueryParseNode> subqueries = Sets.<SubqueryParseNode>newHashSet();
Expression where = WhereCompiler.compile(context, select, viewWhere, subqueries);
// Recompile GROUP BY now that we've figured out our ScanRanges so we know
// definitively whether or not we'll traverse in row key order.
groupBy = groupBy.compile(context, innerPlanTupleProjector);
// recover resolver
context.setResolver(resolver);
RowProjector projector = ProjectionCompiler.compile(context, select, groupBy, asSubquery ? Collections.<PDatum>emptyList() : targetColumns, where);
OrderBy orderBy = OrderByCompiler.compile(context, select, groupBy, limit, offset, projector, groupBy == GroupBy.EMPTY_GROUP_BY ? innerPlanTupleProjector : null, isInRowKeyOrder);
context.getAggregationManager().compile(context, groupBy);
// Final step is to build the query plan
if (!asSubquery) {
int maxRows = statement.getMaxRows();
if (maxRows > 0) {
if (limit != null) {
limit = Math.min(limit, maxRows);
} else {
limit = maxRows;
}
}
}
if (projectedTable != null) {
TupleProjector.serializeProjectorIntoScan(context.getScan(), new TupleProjector(projectedTable));
}
QueryPlan plan = innerPlan;
if (plan == null) {
ParallelIteratorFactory parallelIteratorFactory = asSubquery ? null : this.parallelIteratorFactory;
plan = select.getFrom() == null ? new LiteralResultIterationPlan(context, select, tableRef, projector, limit, offset, orderBy, parallelIteratorFactory) : (select.isAggregate() || select.isDistinct() ? new AggregatePlan(context, select, tableRef, projector, limit, offset, orderBy, parallelIteratorFactory, groupBy, having) : new ScanPlan(context, select, tableRef, projector, limit, offset, orderBy, parallelIteratorFactory, allowPageFilter));
}
if (!subqueries.isEmpty()) {
int count = subqueries.size();
WhereClauseSubPlan[] subPlans = new WhereClauseSubPlan[count];
int i = 0;
for (SubqueryParseNode subqueryNode : subqueries) {
SelectStatement stmt = subqueryNode.getSelectNode();
subPlans[i++] = new WhereClauseSubPlan(compileSubquery(stmt, false), stmt, subqueryNode.expectSingleRow());
}
plan = HashJoinPlan.create(select, plan, null, subPlans);
}
if (innerPlan != null) {
if (LiteralExpression.isTrue(where)) {
// we do not pass "true" as filter
where = null;
}
plan = select.isAggregate() || select.isDistinct() ? new ClientAggregatePlan(context, select, tableRef, projector, limit, offset, where, orderBy, groupBy, having, plan) : new ClientScanPlan(context, select, tableRef, projector, limit, offset, where, orderBy, plan);
}
return plan;
}
use of org.apache.phoenix.parse.SQLParser in project phoenix by apache.
the class CreateTableCompiler method compile.
public MutationPlan compile(CreateTableStatement create) throws SQLException {
final PhoenixConnection connection = statement.getConnection();
ColumnResolver resolver = FromCompiler.getResolverForCreation(create, connection);
PTableType type = create.getTableType();
PhoenixConnection connectionToBe = connection;
PTable parentToBe = null;
ViewType viewTypeToBe = null;
Scan scan = new Scan();
final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
// TODO: support any statement for a VIEW instead of just a WHERE clause
ParseNode whereNode = create.getWhereClause();
String viewStatementToBe = null;
byte[][] viewColumnConstantsToBe = null;
BitSet isViewColumnReferencedToBe = null;
// Check whether column families having local index column family suffix or not if present
// don't allow creating table.
// Also validate the default values expressions.
List<ColumnDef> columnDefs = create.getColumnDefs();
List<ColumnDef> overideColumnDefs = null;
PrimaryKeyConstraint pkConstraint = create.getPrimaryKeyConstraint();
for (int i = 0; i < columnDefs.size(); i++) {
ColumnDef columnDef = columnDefs.get(i);
if (columnDef.getColumnDefName().getFamilyName() != null && columnDef.getColumnDefName().getFamilyName().contains(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_COLUMN_FAMILY).build().buildException();
}
// False means we do not need the default (because it evaluated to null)
if (!columnDef.validateDefault(context, pkConstraint)) {
if (overideColumnDefs == null) {
overideColumnDefs = new ArrayList<>(columnDefs);
}
overideColumnDefs.set(i, new ColumnDef(columnDef, null));
}
}
if (overideColumnDefs != null) {
create = new CreateTableStatement(create, overideColumnDefs);
}
final CreateTableStatement finalCreate = create;
if (type == PTableType.VIEW) {
TableRef tableRef = resolver.getTables().get(0);
int nColumns = tableRef.getTable().getColumns().size();
isViewColumnReferencedToBe = new BitSet(nColumns);
// Used to track column references in a view
ExpressionCompiler expressionCompiler = new ColumnTrackingExpressionCompiler(context, isViewColumnReferencedToBe);
parentToBe = tableRef.getTable();
viewTypeToBe = parentToBe.getViewType() == ViewType.MAPPED ? ViewType.MAPPED : ViewType.UPDATABLE;
if (whereNode == null) {
viewStatementToBe = parentToBe.getViewStatement();
} else {
whereNode = StatementNormalizer.normalize(whereNode, resolver);
if (whereNode.isStateless()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WHERE_IS_CONSTANT).build().buildException();
}
// If our parent has a VIEW statement, combine it with this one
if (parentToBe.getViewStatement() != null) {
SelectStatement select = new SQLParser(parentToBe.getViewStatement()).parseQuery().combine(whereNode);
whereNode = select.getWhere();
}
Expression where = whereNode.accept(expressionCompiler);
if (where != null && !LiteralExpression.isTrue(where)) {
TableName baseTableName = create.getBaseTableName();
StringBuilder buf = new StringBuilder();
whereNode.toSQL(resolver, buf);
viewStatementToBe = QueryUtil.getViewStatement(baseTableName.getSchemaName(), baseTableName.getTableName(), buf.toString());
}
if (viewTypeToBe != ViewType.MAPPED) {
Long scn = connection.getSCN();
connectionToBe = (scn != null || tableRef.getTable().isTransactional()) ? connection : // clocks being in sync.
new PhoenixConnection(// on our connection.
new DelegateConnectionQueryServices(connection.getQueryServices()) {
@Override
public void addTable(PTable table, long resolvedTime) throws SQLException {
connection.addTable(table, resolvedTime);
}
}, connection, tableRef.getTimeStamp() + 1);
viewColumnConstantsToBe = new byte[nColumns][];
ViewWhereExpressionVisitor visitor = new ViewWhereExpressionVisitor(parentToBe, viewColumnConstantsToBe);
where.accept(visitor);
// If view is not updatable, viewColumnConstants should be empty. We will still
// inherit our parent viewConstants, but we have no additional ones.
viewTypeToBe = visitor.isUpdatable() ? ViewType.UPDATABLE : ViewType.READ_ONLY;
if (viewTypeToBe != ViewType.UPDATABLE) {
viewColumnConstantsToBe = null;
}
}
}
}
final ViewType viewType = viewTypeToBe;
final String viewStatement = viewStatementToBe;
final byte[][] viewColumnConstants = viewColumnConstantsToBe;
final BitSet isViewColumnReferenced = isViewColumnReferencedToBe;
List<ParseNode> splitNodes = create.getSplitNodes();
final byte[][] splits = new byte[splitNodes.size()][];
ImmutableBytesWritable ptr = context.getTempPtr();
ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);
for (int i = 0; i < splits.length; i++) {
ParseNode node = splitNodes.get(i);
if (node instanceof BindParseNode) {
context.getBindManager().addParamMetaData((BindParseNode) node, VARBINARY_DATUM);
}
if (node.isStateless()) {
Expression expression = node.accept(expressionCompiler);
if (expression.evaluate(null, ptr)) {
;
splits[i] = ByteUtil.copyKeyBytesIfNecessary(ptr);
continue;
}
}
throw new SQLExceptionInfo.Builder(SQLExceptionCode.SPLIT_POINT_NOT_CONSTANT).setMessage("Node: " + node).build().buildException();
}
final MetaDataClient client = new MetaDataClient(connectionToBe);
final PTable parent = parentToBe;
return new BaseMutationPlan(context, operation) {
@Override
public MutationState execute() throws SQLException {
try {
return client.createTable(finalCreate, splits, parent, viewStatement, viewType, viewColumnConstants, isViewColumnReferenced);
} finally {
if (client.getConnection() != connection) {
client.getConnection().close();
}
}
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
return new ExplainPlan(Collections.singletonList("CREATE TABLE"));
}
};
}
use of org.apache.phoenix.parse.SQLParser in project phoenix by apache.
the class HashJoinPlan method iterator.
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
if (scan == null) {
scan = delegate.getContext().getScan();
}
int count = subPlans.length;
PhoenixConnection connection = getContext().getConnection();
ConnectionQueryServices services = connection.getQueryServices();
ExecutorService executor = services.getExecutor();
List<Future<ServerCache>> futures = Lists.newArrayListWithExpectedSize(count);
if (joinInfo != null) {
hashClient = hashClient != null ? hashClient : new HashCacheClient(delegate.getContext().getConnection());
firstJobEndTime = new AtomicLong(0);
keyRangeExpressions = new CopyOnWriteArrayList<Expression>();
}
for (int i = 0; i < count; i++) {
final int index = i;
futures.add(executor.submit(new JobCallable<ServerCache>() {
@Override
public ServerCache call() throws Exception {
ServerCache cache = subPlans[index].execute(HashJoinPlan.this);
return cache;
}
@Override
public Object getJobId() {
return HashJoinPlan.this;
}
@Override
public TaskExecutionMetricsHolder getTaskExecutionMetric() {
return NO_OP_INSTANCE;
}
}));
}
SQLException firstException = null;
for (int i = 0; i < count; i++) {
try {
ServerCache result = futures.get(i).get();
if (result != null) {
dependencies.add(result);
}
subPlans[i].postProcess(result, this);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
if (firstException == null) {
firstException = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).setMessage("Sub plan [" + i + "] execution interrupted.").build().buildException();
}
} catch (ExecutionException e) {
if (firstException == null) {
firstException = new SQLException("Encountered exception in sub plan [" + i + "] execution.", e.getCause());
}
}
}
if (firstException != null) {
SQLCloseables.closeAllQuietly(dependencies);
throw firstException;
}
Expression postFilter = null;
boolean hasKeyRangeExpressions = keyRangeExpressions != null && !keyRangeExpressions.isEmpty();
if (recompileWhereClause || hasKeyRangeExpressions) {
StatementContext context = delegate.getContext();
PTable table = context.getCurrentTable().getTable();
ParseNode viewWhere = table.getViewStatement() == null ? null : new SQLParser(table.getViewStatement()).parseQuery().getWhere();
context.setResolver(FromCompiler.getResolverForQuery((SelectStatement) (delegate.getStatement()), delegate.getContext().getConnection()));
if (recompileWhereClause) {
postFilter = WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, null);
}
if (hasKeyRangeExpressions) {
WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, keyRangeExpressions, true, null);
}
}
if (joinInfo != null) {
HashJoinInfo.serializeHashJoinIntoScan(scan, joinInfo);
}
ResultIterator iterator = joinInfo == null ? delegate.iterator(scanGrouper, scan) : ((BaseQueryPlan) delegate).iterator(dependencies, scanGrouper, scan);
if (statement.getInnerSelectStatement() != null && postFilter != null) {
iterator = new FilterResultIterator(iterator, postFilter);
}
return iterator;
}
Aggregations