Search in sources :

Example 6 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class ProjectionCompiler method compile.

/**
     * Builds the projection for the scan
     * @param context query context kept between compilation of different query clauses
     * @param statement TODO
     * @param groupBy compiled GROUP BY clause
     * @param targetColumns list of columns, parallel to aliasedNodes, that are being set for an
     * UPSERT SELECT statement. Used to coerce expression types to the expected target type.
     * @return projector used to access row values during scan
     * @throws SQLException 
     */
public static RowProjector compile(StatementContext context, SelectStatement statement, GroupBy groupBy, List<? extends PDatum> targetColumns, Expression where) throws SQLException {
    List<KeyValueColumnExpression> arrayKVRefs = new ArrayList<KeyValueColumnExpression>();
    List<ProjectedColumnExpression> arrayProjectedColumnRefs = new ArrayList<ProjectedColumnExpression>();
    List<Expression> arrayKVFuncs = new ArrayList<Expression>();
    List<Expression> arrayOldFuncs = new ArrayList<Expression>();
    Map<Expression, Integer> arrayExpressionCounts = new HashMap<>();
    List<AliasedNode> aliasedNodes = statement.getSelect();
    // Setup projected columns in Scan
    SelectClauseVisitor selectVisitor = new SelectClauseVisitor(context, groupBy, arrayKVRefs, arrayKVFuncs, arrayExpressionCounts, arrayProjectedColumnRefs, arrayOldFuncs, statement);
    List<ExpressionProjector> projectedColumns = new ArrayList<ExpressionProjector>();
    ColumnResolver resolver = context.getResolver();
    TableRef tableRef = context.getCurrentTable();
    PTable table = tableRef.getTable();
    boolean resolveColumn = !tableRef.equals(resolver.getTables().get(0));
    boolean isWildcard = false;
    Scan scan = context.getScan();
    int index = 0;
    List<Expression> projectedExpressions = Lists.newArrayListWithExpectedSize(aliasedNodes.size());
    List<byte[]> projectedFamilies = Lists.newArrayListWithExpectedSize(aliasedNodes.size());
    for (AliasedNode aliasedNode : aliasedNodes) {
        ParseNode node = aliasedNode.getNode();
        // TODO: visitor?
        if (node instanceof WildcardParseNode) {
            if (statement.isAggregate()) {
                ExpressionCompiler.throwNonAggExpressionInAggException(node.toString());
            }
            if (tableRef == TableRef.EMPTY_TABLE_REF) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT).build().buildException();
            }
            isWildcard = true;
            if (tableRef.getTable().getType() == PTableType.INDEX && ((WildcardParseNode) node).isRewrite()) {
                projectAllIndexColumns(context, tableRef, resolveColumn, projectedExpressions, projectedColumns, targetColumns);
            } else {
                projectAllTableColumns(context, tableRef, resolveColumn, projectedExpressions, projectedColumns, targetColumns);
            }
        } else if (node instanceof TableWildcardParseNode) {
            TableName tName = ((TableWildcardParseNode) node).getTableName();
            TableRef tRef = resolver.resolveTable(tName.getSchemaName(), tName.getTableName());
            if (tRef.equals(tableRef)) {
                isWildcard = true;
            }
            if (tRef.getTable().getType() == PTableType.INDEX && ((TableWildcardParseNode) node).isRewrite()) {
                projectAllIndexColumns(context, tRef, true, projectedExpressions, projectedColumns, targetColumns);
            } else {
                projectAllTableColumns(context, tRef, true, projectedExpressions, projectedColumns, targetColumns);
            }
        } else if (node instanceof FamilyWildcardParseNode) {
            if (tableRef == TableRef.EMPTY_TABLE_REF) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT).build().buildException();
            }
            // Project everything for SELECT cf.*
            String cfName = ((FamilyWildcardParseNode) node).getName();
            // Delay projecting to scan, as when any other column in the column family gets
            // added to the scan, it overwrites that we want to project the entire column
            // family. Instead, we do the projection at the end.
            // TODO: consider having a ScanUtil.addColumn and ScanUtil.addFamily to work
            // around this, as this code depends on this function being the last place where
            // columns are projected (which is currently true, but could change).
            projectedFamilies.add(Bytes.toBytes(cfName));
            if (tableRef.getTable().getType() == PTableType.INDEX && ((FamilyWildcardParseNode) node).isRewrite()) {
                projectIndexColumnFamily(context, cfName, tableRef, resolveColumn, projectedExpressions, projectedColumns);
            } else {
                projectTableColumnFamily(context, cfName, tableRef, resolveColumn, projectedExpressions, projectedColumns);
            }
        } else {
            Expression expression = node.accept(selectVisitor);
            projectedExpressions.add(expression);
            expression = coerceIfNecessary(index, targetColumns, expression);
            if (node instanceof BindParseNode) {
                context.getBindManager().addParamMetaData((BindParseNode) node, expression);
            }
            if (!node.isStateless()) {
                if (!selectVisitor.isAggregate() && statement.isAggregate()) {
                    ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString());
                }
            }
            String columnAlias = aliasedNode.getAlias() != null ? aliasedNode.getAlias() : SchemaUtil.normalizeIdentifier(aliasedNode.getNode().getAlias());
            boolean isCaseSensitive = aliasedNode.getAlias() != null ? aliasedNode.isCaseSensitve() : (columnAlias != null ? SchemaUtil.isCaseSensitive(aliasedNode.getNode().getAlias()) : selectVisitor.isCaseSensitive);
            String name = columnAlias == null ? expression.toString() : columnAlias;
            projectedColumns.add(new ExpressionProjector(name, tableRef.getTableAlias() == null ? (table.getName() == null ? "" : table.getName().getString()) : tableRef.getTableAlias(), expression, isCaseSensitive));
        }
        selectVisitor.reset();
        index++;
    }
    for (int i = arrayProjectedColumnRefs.size() - 1; i >= 0; i--) {
        Expression expression = arrayProjectedColumnRefs.get(i);
        Integer count = arrayExpressionCounts.get(expression);
        if (count != 0) {
            arrayKVRefs.remove(i);
            arrayKVFuncs.remove(i);
            arrayOldFuncs.remove(i);
        }
    }
    if (arrayKVFuncs.size() > 0 && arrayKVRefs.size() > 0) {
        serailizeArrayIndexInformationAndSetInScan(context, arrayKVFuncs, arrayKVRefs);
        KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0);
        for (Expression expression : arrayKVRefs) {
            builder.addField(expression);
        }
        KeyValueSchema kvSchema = builder.build();
        ValueBitSet arrayIndexesBitSet = ValueBitSet.newInstance(kvSchema);
        builder = new KeyValueSchemaBuilder(0);
        for (Expression expression : arrayKVFuncs) {
            builder.addField(expression);
        }
        KeyValueSchema arrayIndexesSchema = builder.build();
        Map<Expression, Expression> replacementMap = new HashMap<>();
        for (int i = 0; i < arrayOldFuncs.size(); i++) {
            Expression function = arrayKVFuncs.get(i);
            replacementMap.put(arrayOldFuncs.get(i), new ArrayIndexExpression(i, function.getDataType(), arrayIndexesBitSet, arrayIndexesSchema));
        }
        ReplaceArrayFunctionExpressionVisitor visitor = new ReplaceArrayFunctionExpressionVisitor(replacementMap);
        for (int i = 0; i < projectedColumns.size(); i++) {
            ExpressionProjector projector = projectedColumns.get(i);
            projectedColumns.set(i, new ExpressionProjector(projector.getName(), tableRef.getTableAlias() == null ? (table.getName() == null ? "" : table.getName().getString()) : tableRef.getTableAlias(), projector.getExpression().accept(visitor), projector.isCaseSensitive()));
        }
    }
    // TODO make estimatedByteSize more accurate by counting the joined columns.
    int estimatedKeySize = table.getRowKeySchema().getEstimatedValueLength();
    int estimatedByteSize = 0;
    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : scan.getFamilyMap().entrySet()) {
        PColumnFamily family = table.getColumnFamily(entry.getKey());
        if (entry.getValue() == null) {
            for (PColumn column : family.getColumns()) {
                Integer maxLength = column.getMaxLength();
                int byteSize = column.getDataType().isFixedWidth() ? maxLength == null ? column.getDataType().getByteSize() : maxLength : RowKeySchema.ESTIMATED_VARIABLE_LENGTH_SIZE;
                estimatedByteSize += SizedUtil.KEY_VALUE_SIZE + estimatedKeySize + byteSize;
            }
        } else {
            for (byte[] cq : entry.getValue()) {
                //if (!Bytes.equals(cq, ByteUtil.EMPTY_BYTE_ARRAY) || cq.length > 0) {
                PColumn column = family.getPColumnForColumnQualifier(cq);
                Integer maxLength = column.getMaxLength();
                int byteSize = column.getDataType().isFixedWidth() ? maxLength == null ? column.getDataType().getByteSize() : maxLength : RowKeySchema.ESTIMATED_VARIABLE_LENGTH_SIZE;
                estimatedByteSize += SizedUtil.KEY_VALUE_SIZE + estimatedKeySize + byteSize;
            }
        //}
        }
    }
    boolean isProjectEmptyKeyValue = false;
    if (isWildcard) {
        projectAllColumnFamilies(table, scan);
    } else {
        isProjectEmptyKeyValue = where == null || LiteralExpression.isTrue(where) || where.requiresFinalEvaluation();
        for (byte[] family : projectedFamilies) {
            projectColumnFamily(table, scan, family);
        }
    }
    return new RowProjector(projectedColumns, estimatedByteSize, isProjectEmptyKeyValue, resolver.hasUDFs(), isWildcard);
}
Also used : NavigableSet(java.util.NavigableSet) HashMap(java.util.HashMap) KeyValueSchemaBuilder(org.apache.phoenix.schema.KeyValueSchema.KeyValueSchemaBuilder) ArrayList(java.util.ArrayList) FamilyWildcardParseNode(org.apache.phoenix.parse.FamilyWildcardParseNode) WildcardParseNode(org.apache.phoenix.parse.WildcardParseNode) TableWildcardParseNode(org.apache.phoenix.parse.TableWildcardParseNode) KeyValueSchemaBuilder(org.apache.phoenix.schema.KeyValueSchema.KeyValueSchemaBuilder) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) ReplaceArrayFunctionExpressionVisitor(org.apache.phoenix.expression.visitor.ReplaceArrayFunctionExpressionVisitor) FunctionParseNode(org.apache.phoenix.parse.FunctionParseNode) BindParseNode(org.apache.phoenix.parse.BindParseNode) ColumnParseNode(org.apache.phoenix.parse.ColumnParseNode) SequenceValueParseNode(org.apache.phoenix.parse.SequenceValueParseNode) FamilyWildcardParseNode(org.apache.phoenix.parse.FamilyWildcardParseNode) WildcardParseNode(org.apache.phoenix.parse.WildcardParseNode) TableWildcardParseNode(org.apache.phoenix.parse.TableWildcardParseNode) ParseNode(org.apache.phoenix.parse.ParseNode) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) KeyValueSchema(org.apache.phoenix.schema.KeyValueSchema) ValueBitSet(org.apache.phoenix.schema.ValueBitSet) TableWildcardParseNode(org.apache.phoenix.parse.TableWildcardParseNode) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) AliasedNode(org.apache.phoenix.parse.AliasedNode) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) TableName(org.apache.phoenix.parse.TableName) FamilyWildcardParseNode(org.apache.phoenix.parse.FamilyWildcardParseNode) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) BaseTerminalExpression(org.apache.phoenix.expression.BaseTerminalExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) Scan(org.apache.hadoop.hbase.client.Scan) Map(java.util.Map) HashMap(java.util.HashMap) TableRef(org.apache.phoenix.schema.TableRef) BindParseNode(org.apache.phoenix.parse.BindParseNode)

Example 7 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class OrderByCompiler method compile.

/**
     * Gets a list of columns in the ORDER BY clause
     * @param context the query context for tracking various states
     * associated with the given select statement
     * @param statement TODO
     * @param groupBy the list of columns in the GROUP BY clause
     * @param limit the row limit or null if no limit
     * @return the compiled ORDER BY clause
     * @throws SQLException
     */
public static OrderBy compile(StatementContext context, SelectStatement statement, GroupBy groupBy, Integer limit, Integer offset, RowProjector rowProjector, TupleProjector tupleProjector, boolean isInRowKeyOrder) throws SQLException {
    List<OrderByNode> orderByNodes = statement.getOrderBy();
    if (orderByNodes.isEmpty()) {
        return OrderBy.EMPTY_ORDER_BY;
    }
    // for ungroupedAggregates as GROUP BY expression, check against an empty group by
    ExpressionCompiler compiler;
    if (groupBy.isUngroupedAggregate()) {
        compiler = new ExpressionCompiler(context, GroupBy.EMPTY_GROUP_BY) {

            @Override
            protected Expression addExpression(Expression expression) {
                return expression;
            }

            @Override
            protected void addColumn(PColumn column) {
            }
        };
    } else {
        compiler = new ExpressionCompiler(context, groupBy);
    }
    // accumulate columns in ORDER BY
    OrderPreservingTracker tracker = new OrderPreservingTracker(context, groupBy, Ordering.ORDERED, orderByNodes.size(), tupleProjector);
    LinkedHashSet<OrderByExpression> orderByExpressions = Sets.newLinkedHashSetWithExpectedSize(orderByNodes.size());
    for (OrderByNode node : orderByNodes) {
        ParseNode parseNode = node.getNode();
        Expression expression = null;
        if (parseNode instanceof LiteralParseNode && ((LiteralParseNode) parseNode).getType() == PInteger.INSTANCE) {
            Integer index = (Integer) ((LiteralParseNode) parseNode).getValue();
            int size = rowProjector.getColumnProjectors().size();
            if (index > size || index <= 0) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND).build().buildException();
            }
            expression = rowProjector.getColumnProjector(index - 1).getExpression();
        } else {
            expression = node.getNode().accept(compiler);
            // Detect mix of aggregate and non aggregates (i.e. ORDER BY txns, SUM(txns)
            if (!expression.isStateless() && !compiler.isAggregate()) {
                if (statement.isAggregate() || statement.isDistinct()) {
                    // Detect ORDER BY not in SELECT DISTINCT: SELECT DISTINCT count(*) FROM t ORDER BY x
                    if (statement.isDistinct()) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.ORDER_BY_NOT_IN_SELECT_DISTINCT).setMessage(expression.toString()).build().buildException();
                    }
                    ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString());
                }
            }
        }
        if (!expression.isStateless()) {
            boolean isAscending = node.isAscending();
            boolean isNullsLast = node.isNullsLast();
            tracker.track(expression, isAscending ? SortOrder.ASC : SortOrder.DESC, isNullsLast);
            // since this is the order they actually are in.
            if (expression.getSortOrder() == SortOrder.DESC) {
                isAscending = !isAscending;
            }
            OrderByExpression orderByExpression = new OrderByExpression(expression, isNullsLast, isAscending);
            orderByExpressions.add(orderByExpression);
        }
        compiler.reset();
    }
    // we can remove ORDER BY clauses in case of only COUNT(DISTINCT...) clauses
    if (orderByExpressions.isEmpty() || groupBy.isUngroupedAggregate()) {
        return OrderBy.EMPTY_ORDER_BY;
    }
    // If we're ordering by the order returned by the scan, we don't need an order by
    if (isInRowKeyOrder && tracker.isOrderPreserving()) {
        if (tracker.isReverse()) {
            // REV_ROW_KEY_ORDER_BY scan would not take effect for a projected table, so don't return it for such table types.
            if (context.getConnection().getQueryServices().getProps().getBoolean(QueryServices.USE_REVERSE_SCAN_ATTRIB, QueryServicesOptions.DEFAULT_USE_REVERSE_SCAN) && !context.getScanRanges().useSkipScanFilter() && context.getCurrentTable().getTable().getType() != PTableType.PROJECTED && context.getCurrentTable().getTable().getType() != PTableType.SUBQUERY) {
                return OrderBy.REV_ROW_KEY_ORDER_BY;
            }
        } else {
            return OrderBy.FWD_ROW_KEY_ORDER_BY;
        }
    }
    return new OrderBy(Lists.newArrayList(orderByExpressions.iterator()));
}
Also used : OrderByNode(org.apache.phoenix.parse.OrderByNode) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) PColumn(org.apache.phoenix.schema.PColumn) PInteger(org.apache.phoenix.schema.types.PInteger) OrderByExpression(org.apache.phoenix.expression.OrderByExpression) Expression(org.apache.phoenix.expression.Expression) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) ParseNode(org.apache.phoenix.parse.ParseNode) OrderByExpression(org.apache.phoenix.expression.OrderByExpression) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Example 8 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class PostDDLCompiler method compile.

public MutationPlan compile(final List<TableRef> tableRefs, final byte[] emptyCF, final List<byte[]> projectCFs, final List<PColumn> deleteList, final long timestamp) throws SQLException {
    PhoenixStatement statement = new PhoenixStatement(connection);
    final StatementContext context = new StatementContext(statement, new ColumnResolver() {

        @Override
        public List<TableRef> getTables() {
            return tableRefs;
        }

        @Override
        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public List<PFunction> getFunctions() {
            return Collections.<PFunction>emptyList();
        }

        @Override
        public PFunction resolveFunction(String functionName) throws SQLException {
            throw new FunctionNotFoundException(functionName);
        }

        @Override
        public boolean hasUDFs() {
            return false;
        }

        @Override
        public PSchema resolveSchema(String schemaName) throws SQLException {
            throw new SchemaNotFoundException(schemaName);
        }

        @Override
        public List<PSchema> getSchemas() {
            throw new UnsupportedOperationException();
        }
    }, scan, new SequenceManager(statement));
    return new BaseMutationPlan(context, Operation.UPSERT) {

        /* FIXME */
        @Override
        public MutationState execute() throws SQLException {
            if (tableRefs.isEmpty()) {
                return new MutationState(0, 1000, connection);
            }
            boolean wasAutoCommit = connection.getAutoCommit();
            try {
                connection.setAutoCommit(true);
                SQLException sqlE = null;
                /*
                     * Handles:
                     * 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX;
                     * 2) deletion of all column values for a ALTER TABLE DROP COLUMN
                     * 3) updating the necessary rows to have an empty KV
                     * 4) updating table stats
                     */
                long totalMutationCount = 0;
                for (final TableRef tableRef : tableRefs) {
                    Scan scan = ScanUtil.newScan(context.getScan());
                    SelectStatement select = SelectStatement.COUNT_ONE;
                    // We need to use this tableRef
                    ColumnResolver resolver = new ColumnResolver() {

                        @Override
                        public List<TableRef> getTables() {
                            return Collections.singletonList(tableRef);
                        }

                        @Override
                        public java.util.List<PFunction> getFunctions() {
                            return Collections.emptyList();
                        }

                        ;

                        @Override
                        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
                            PColumn column = tableName != null ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) : tableRef.getTable().getColumnForColumnName(colName);
                            return new ColumnRef(tableRef, column.getPosition());
                        }

                        @Override
                        public PFunction resolveFunction(String functionName) throws SQLException {
                            throw new UnsupportedOperationException();
                        }

                        ;

                        @Override
                        public boolean hasUDFs() {
                            return false;
                        }

                        @Override
                        public List<PSchema> getSchemas() {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public PSchema resolveSchema(String schemaName) throws SQLException {
                            throw new SchemaNotFoundException(schemaName);
                        }
                    };
                    PhoenixStatement statement = new PhoenixStatement(connection);
                    StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
                    long ts = timestamp;
                    // in this case, so maybe this is ok.
                    if (ts != HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
                        ts = TransactionUtil.convertToNanoseconds(ts);
                    }
                    ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts);
                    if (emptyCF != null) {
                        scan.setAttribute(BaseScannerRegionObserver.EMPTY_CF, emptyCF);
                        scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst());
                    }
                    ServerCache cache = null;
                    try {
                        if (deleteList != null) {
                            if (deleteList.isEmpty()) {
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
                            // In the case of a row deletion, add index metadata so mutable secondary indexing works
                            /* TODO: we currently manually run a scan to delete the index data here
                                    ImmutableBytesWritable ptr = context.getTempPtr();
                                    tableRef.getTable().getIndexMaintainers(ptr);
                                    if (ptr.getLength() > 0) {
                                        IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
                                        cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
                                        byte[] uuidValue = cache.getId();
                                        scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                    }
                                    */
                            } else {
                                // In the case of the empty key value column family changing, do not send the index
                                // metadata, as we're currently managing this from the client. It's possible for the
                                // data empty column family to stay the same, while the index empty column family
                                // changes.
                                PColumn column = deleteList.get(0);
                                byte[] cq = column.getColumnQualifierBytes();
                                if (emptyCF == null) {
                                    scan.addColumn(column.getFamilyName().getBytes(), cq);
                                }
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_CF, column.getFamilyName().getBytes());
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_CQ, cq);
                            }
                        }
                        List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
                        if (projectCFs == null) {
                            for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
                                columnFamilies.add(family.getName().getBytes());
                            }
                        } else {
                            for (byte[] projectCF : projectCFs) {
                                columnFamilies.add(projectCF);
                            }
                        }
                        // Need to project all column families into the scan, since we haven't yet created our empty key value
                        RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
                        context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
                        // since at this point we haven't added the empty key value everywhere.
                        if (columnFamilies != null) {
                            scan.getFamilyMap().clear();
                            for (byte[] family : columnFamilies) {
                                scan.addFamily(family);
                            }
                            projector = new RowProjector(projector, false);
                        }
                        // any other Post DDL operations.
                        try {
                            // Since dropping a VIEW does not affect the underlying data, we do
                            // not need to pass through the view statement here.
                            // Push where clause into scan
                            WhereCompiler.compile(context, select);
                        } catch (ColumnFamilyNotFoundException e) {
                            continue;
                        } catch (ColumnNotFoundException e) {
                            continue;
                        } catch (AmbiguousColumnException e) {
                            continue;
                        }
                        QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
                        try {
                            ResultIterator iterator = plan.iterator();
                            try {
                                Tuple row = iterator.next();
                                ImmutableBytesWritable ptr = context.getTempPtr();
                                totalMutationCount += (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
                            } catch (SQLException e) {
                                sqlE = e;
                            } finally {
                                try {
                                    iterator.close();
                                } catch (SQLException e) {
                                    if (sqlE == null) {
                                        sqlE = e;
                                    } else {
                                        sqlE.setNextException(e);
                                    }
                                } finally {
                                    if (sqlE != null) {
                                        throw sqlE;
                                    }
                                }
                            }
                        } catch (TableNotFoundException e) {
                        // Ignore and continue, as HBase throws when table hasn't been written to
                        // FIXME: Remove if this is fixed in 0.96
                        }
                    } finally {
                        if (cache != null) {
                            // Remove server cache if there is one
                            cache.close();
                        }
                    }
                }
                final long count = totalMutationCount;
                return new MutationState(1, 1000, connection) {

                    @Override
                    public long getUpdateCount() {
                        return count;
                    }
                };
            } finally {
                if (!wasAutoCommit)
                    connection.setAutoCommit(wasAutoCommit);
            }
        }
    };
}
Also used : ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) PFunction(org.apache.phoenix.parse.PFunction) SQLException(java.sql.SQLException) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) List(java.util.List) AmbiguousColumnException(org.apache.phoenix.schema.AmbiguousColumnException) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ResultIterator(org.apache.phoenix.iterate.ResultIterator) PSchema(org.apache.phoenix.parse.PSchema) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) FunctionNotFoundException(org.apache.phoenix.schema.FunctionNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) MutationState(org.apache.phoenix.execute.MutationState) Scan(org.apache.hadoop.hbase.client.Scan) ColumnRef(org.apache.phoenix.schema.ColumnRef) SchemaNotFoundException(org.apache.phoenix.schema.SchemaNotFoundException) TableRef(org.apache.phoenix.schema.TableRef) Tuple(org.apache.phoenix.schema.tuple.Tuple)

Example 9 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class PostIndexDDLCompiler method compile.

public MutationPlan compile(final PTable indexTable) throws SQLException {
    /*
         * Handles:
         * 1) Populate a newly created table with contents.
         * 2) Activate the index by setting the INDEX_STATE to 
         */
    // NOTE: For first version, we would use a upsert/select to populate the new index table and
    //   returns synchronously. Creating an index on an existing table with large amount of data
    //   will as a result take a very very long time.
    //   In the long term, we should change this to an asynchronous process to populate the index
    //   that would allow the user to easily monitor the process of index creation.
    StringBuilder indexColumns = new StringBuilder();
    StringBuilder dataColumns = new StringBuilder();
    // Add the pk index columns
    List<PColumn> indexPKColumns = indexTable.getPKColumns();
    int nIndexPKColumns = indexTable.getPKColumns().size();
    boolean isSalted = indexTable.getBucketNum() != null;
    boolean isMultiTenant = connection.getTenantId() != null && indexTable.isMultiTenant();
    boolean isViewIndex = indexTable.getViewIndexId() != null;
    int posOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isViewIndex ? 1 : 0);
    for (int i = posOffset; i < nIndexPKColumns; i++) {
        PColumn col = indexPKColumns.get(i);
        String indexColName = col.getName().getString();
        // need to escape backslash as this used in the SELECT statement
        String dataColName = StringUtil.escapeBackslash(col.getExpressionStr());
        dataColumns.append(dataColName).append(",");
        indexColumns.append('"').append(indexColName).append("\",");
        indexColumnNames.add(indexColName);
        dataColumnNames.add(dataColName);
    }
    // Add the covered columns
    for (PColumnFamily family : indexTable.getColumnFamilies()) {
        for (PColumn col : family.getColumns()) {
            if (col.getViewConstant() == null) {
                String indexColName = col.getName().getString();
                String dataFamilyName = IndexUtil.getDataColumnFamilyName(indexColName);
                String dataColumnName = IndexUtil.getDataColumnName(indexColName);
                if (!dataFamilyName.equals("")) {
                    dataColumns.append('"').append(dataFamilyName).append("\".");
                }
                dataColumns.append('"').append(dataColumnName).append("\",");
                indexColumns.append('"').append(indexColName).append("\",");
                indexColumnNames.add(indexColName);
                dataColumnNames.add(dataColumnName);
            }
        }
    }
    final PTable dataTable = dataTableRef.getTable();
    dataColumns.setLength(dataColumns.length() - 1);
    indexColumns.setLength(indexColumns.length() - 1);
    String schemaName = dataTable.getSchemaName().getString();
    String tableName = indexTable.getTableName().getString();
    StringBuilder updateStmtStr = new StringBuilder();
    updateStmtStr.append("UPSERT /*+ NO_INDEX */ INTO ").append(schemaName.length() == 0 ? "" : '"' + schemaName + "\".").append('"').append(tableName).append("\"(").append(indexColumns).append(") ");
    final StringBuilder selectQueryBuilder = new StringBuilder();
    selectQueryBuilder.append(" SELECT ").append(dataColumns).append(" FROM ").append(schemaName.length() == 0 ? "" : '"' + schemaName + "\".").append('"').append(dataTable.getTableName().getString()).append('"');
    this.selectQuery = selectQueryBuilder.toString();
    updateStmtStr.append(this.selectQuery);
    try (final PhoenixStatement statement = new PhoenixStatement(connection)) {
        DelegateMutationPlan delegate = new DelegateMutationPlan(statement.compileMutation(updateStmtStr.toString())) {

            @Override
            public MutationState execute() throws SQLException {
                connection.getMutationState().commitDDLFence(dataTable);
                return super.execute();
            }
        };
        return delegate;
    }
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable)

Example 10 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class DeleteCompiler method deleteRows.

private static MutationState deleteRows(StatementContext childContext, TableRef targetTableRef, List<TableRef> indexTableRefs, ResultIterator iterator, RowProjector projector, TableRef sourceTableRef) throws SQLException {
    PTable table = targetTableRef.getTable();
    PhoenixStatement statement = childContext.getStatement();
    PhoenixConnection connection = statement.getConnection();
    PName tenantId = connection.getTenantId();
    byte[] tenantIdBytes = null;
    if (tenantId != null) {
        tenantIdBytes = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, table.getViewIndexId() != null);
    }
    final boolean isAutoCommit = connection.getAutoCommit();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
    Map<ImmutableBytesPtr, RowMutationState> mutations = Maps.newHashMapWithExpectedSize(batchSize);
    List<Map<ImmutableBytesPtr, RowMutationState>> indexMutations = null;
    // the data table through a single query to save executing an additional one.
    if (!indexTableRefs.isEmpty()) {
        indexMutations = Lists.newArrayListWithExpectedSize(indexTableRefs.size());
        for (int i = 0; i < indexTableRefs.size(); i++) {
            indexMutations.add(Maps.<ImmutableBytesPtr, RowMutationState>newHashMapWithExpectedSize(batchSize));
        }
    }
    List<PColumn> pkColumns = table.getPKColumns();
    boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null;
    boolean isSharedViewIndex = table.getViewIndexId() != null;
    int offset = (table.getBucketNum() == null ? 0 : 1);
    byte[][] values = new byte[pkColumns.size()][];
    if (isSharedViewIndex) {
        values[offset++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
    }
    if (isMultiTenant) {
        values[offset++] = tenantIdBytes;
    }
    try (PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
        int rowCount = 0;
        while (rs.next()) {
            // allocate new as this is a key in a Map
            ImmutableBytesPtr ptr = new ImmutableBytesPtr();
            // there's no transation required.
            if (sourceTableRef.equals(targetTableRef)) {
                rs.getCurrentRow().getKey(ptr);
            } else {
                for (int i = offset; i < values.length; i++) {
                    byte[] byteValue = rs.getBytes(i + 1 - offset);
                    // TODO: consider going under the hood and just getting the bytes
                    if (pkColumns.get(i).getSortOrder() == SortOrder.DESC) {
                        byte[] tempByteValue = Arrays.copyOf(byteValue, byteValue.length);
                        byteValue = SortOrder.invert(byteValue, 0, tempByteValue, 0, byteValue.length);
                    }
                    values[i] = byteValue;
                }
                table.newKey(ptr, values);
            }
            // When issuing deletes, we do not care about the row time ranges. Also, if the table had a row timestamp column, then the
            // row key will already have its value. 
            mutations.put(ptr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
            for (int i = 0; i < indexTableRefs.size(); i++) {
                // allocate new as this is a key in a Map
                ImmutableBytesPtr indexPtr = new ImmutableBytesPtr();
                rs.getCurrentRow().getKey(indexPtr);
                indexMutations.get(i).put(indexPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
            }
            if (mutations.size() > maxSize) {
                throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize);
            }
            rowCount++;
            // Commit a batch if auto commit is true and we're at our batch size
            if (isAutoCommit && rowCount % batchSize == 0) {
                MutationState state = new MutationState(targetTableRef, mutations, 0, maxSize, maxSizeBytes, connection);
                connection.getMutationState().join(state);
                for (int i = 0; i < indexTableRefs.size(); i++) {
                    MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
                    connection.getMutationState().join(indexState);
                }
                connection.getMutationState().send();
                mutations.clear();
                if (indexMutations != null) {
                    indexMutations.clear();
                }
            }
        }
        // If auto commit is true, this last batch will be committed upon return
        int nCommittedRows = isAutoCommit ? (rowCount / batchSize * batchSize) : 0;
        MutationState state = new MutationState(targetTableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection);
        for (int i = 0; i < indexTableRefs.size(); i++) {
            // To prevent the counting of these index rows, we have a negative for remainingRows.
            MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
            state.join(indexState);
        }
        return state;
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) PColumn(org.apache.phoenix.schema.PColumn) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) PName(org.apache.phoenix.schema.PName) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) Map(java.util.Map) HashMap(java.util.HashMap) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Aggregations

PColumn (org.apache.phoenix.schema.PColumn)87 PTable (org.apache.phoenix.schema.PTable)47 Expression (org.apache.phoenix.expression.Expression)20 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)20 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)17 ArrayList (java.util.ArrayList)16 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)16 ColumnRef (org.apache.phoenix.schema.ColumnRef)16 PName (org.apache.phoenix.schema.PName)16 TableRef (org.apache.phoenix.schema.TableRef)16 PTableKey (org.apache.phoenix.schema.PTableKey)15 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)14 SQLException (java.sql.SQLException)13 PColumnFamily (org.apache.phoenix.schema.PColumnFamily)13 PColumnImpl (org.apache.phoenix.schema.PColumnImpl)12 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)11 Test (org.junit.Test)11 KeyValueColumnExpression (org.apache.phoenix.expression.KeyValueColumnExpression)10 Hint (org.apache.phoenix.parse.HintNode.Hint)10 List (java.util.List)9