Search in sources :

Example 6 with StatementContext

use of org.apache.phoenix.compile.StatementContext in project phoenix by apache.

the class RoundRobinResultIterator method fetchNextBatch.

private List<RoundRobinIterator> fetchNextBatch() throws SQLException {
    int numExpectedIterators = openIterators.size();
    List<Future<Tuple>> futures = new ArrayList<>(numExpectedIterators);
    List<RoundRobinIterator> results = new ArrayList<>();
    // Randomize the order in which we will be hitting region servers to try not overload particular region servers.
    Collections.shuffle(openIterators);
    boolean success = false;
    SQLException toThrow = null;
    try {
        StatementContext context = plan.getContext();
        final ConnectionQueryServices services = context.getConnection().getQueryServices();
        ExecutorService executor = services.getExecutor();
        numParallelFetches++;
        if (logger.isDebugEnabled()) {
            logger.debug("Performing parallel fetch for " + openIterators.size() + " iterators. ");
        }
        for (final RoundRobinIterator itr : openIterators) {
            Future<Tuple> future = executor.submit(new Callable<Tuple>() {

                @Override
                public Tuple call() throws Exception {
                    // Read the next record to refill the scanner's cache.
                    return itr.next();
                }
            });
            futures.add(future);
        }
        int i = 0;
        for (Future<Tuple> future : futures) {
            Tuple tuple = future.get();
            if (tuple != null) {
                results.add(new RoundRobinIterator(openIterators.get(i).delegate, tuple));
            } else {
                // Underlying scanner is exhausted. So close it.
                openIterators.get(i).close();
            }
            i++;
        }
        success = true;
        return results;
    } catch (SQLException e) {
        toThrow = e;
    } catch (Exception e) {
        toThrow = ServerUtil.parseServerException(e);
    } finally {
        try {
            if (!success) {
                try {
                    close();
                } catch (Exception e) {
                    if (toThrow == null) {
                        toThrow = ServerUtil.parseServerException(e);
                    } else {
                        toThrow.setNextException(ServerUtil.parseServerException(e));
                    }
                }
            }
        } finally {
            if (toThrow != null) {
                GLOBAL_FAILED_QUERY_COUNTER.increment();
                throw toThrow;
            }
        }
    }
    // Not reachable
    return null;
}
Also used : SQLException(java.sql.SQLException) ArrayList(java.util.ArrayList) SQLException(java.sql.SQLException) StatementContext(org.apache.phoenix.compile.StatementContext) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) Tuple(org.apache.phoenix.schema.tuple.Tuple)

Example 7 with StatementContext

use of org.apache.phoenix.compile.StatementContext in project phoenix by apache.

the class ParallelIteratorsSplitTest method getSplits.

private static List<KeyRange> getSplits(final TableRef tableRef, final Scan scan, final List<HRegionLocation> regions, final ScanRanges scanRanges) throws SQLException {
    final List<TableRef> tableRefs = Collections.singletonList(tableRef);
    ColumnResolver resolver = new ColumnResolver() {

        @Override
        public List<PFunction> getFunctions() {
            return Collections.emptyList();
        }

        @Override
        public List<TableRef> getTables() {
            return tableRefs;
        }

        @Override
        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public PFunction resolveFunction(String functionName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public boolean hasUDFs() {
            return false;
        }

        @Override
        public PSchema resolveSchema(String schemaName) throws SQLException {
            return null;
        }

        @Override
        public List<PSchema> getSchemas() {
            return null;
        }
    };
    PhoenixConnection connection = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    final PhoenixStatement statement = new PhoenixStatement(connection);
    final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
    context.setScanRanges(scanRanges);
    ParallelIterators parallelIterators = new ParallelIterators(new QueryPlan() {

        private final Set<TableRef> tableRefs = ImmutableSet.of(tableRef);

        @Override
        public StatementContext getContext() {
            return context;
        }

        @Override
        public ParameterMetaData getParameterMetaData() {
            return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA;
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
            return ExplainPlan.EMPTY_PLAN;
        }

        @Override
        public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
            return ResultIterator.EMPTY_ITERATOR;
        }

        @Override
        public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
            return ResultIterator.EMPTY_ITERATOR;
        }

        @Override
        public ResultIterator iterator() throws SQLException {
            return ResultIterator.EMPTY_ITERATOR;
        }

        @Override
        public long getEstimatedSize() {
            return 0;
        }

        @Override
        public Set<TableRef> getSourceRefs() {
            return tableRefs;
        }

        @Override
        public TableRef getTableRef() {
            return tableRef;
        }

        @Override
        public RowProjector getProjector() {
            return RowProjector.EMPTY_PROJECTOR;
        }

        @Override
        public Integer getLimit() {
            return null;
        }

        @Override
        public Integer getOffset() {
            return null;
        }

        @Override
        public OrderBy getOrderBy() {
            return OrderBy.EMPTY_ORDER_BY;
        }

        @Override
        public GroupBy getGroupBy() {
            return GroupBy.EMPTY_GROUP_BY;
        }

        @Override
        public List<KeyRange> getSplits() {
            return null;
        }

        @Override
        public FilterableStatement getStatement() {
            return SelectStatement.SELECT_ONE;
        }

        @Override
        public boolean isDegenerate() {
            return false;
        }

        @Override
        public boolean isRowKeyOrdered() {
            return true;
        }

        @Override
        public List<List<Scan>> getScans() {
            return null;
        }

        @Override
        public Operation getOperation() {
            return Operation.QUERY;
        }

        @Override
        public boolean useRoundRobinIterator() {
            return false;
        }

        @Override
        public Long getEstimatedRowsToScan() {
            return null;
        }

        @Override
        public Long getEstimatedBytesToScan() {
            return null;
        }
    }, null, new SpoolingResultIterator.SpoolingResultIteratorFactory(context.getConnection().getQueryServices()), context.getScan(), false);
    List<KeyRange> keyRanges = parallelIterators.getSplits();
    return keyRanges;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) PFunction(org.apache.phoenix.parse.PFunction) SQLException(java.sql.SQLException) Operation(org.apache.phoenix.jdbc.PhoenixStatement.Operation) QueryPlan(org.apache.phoenix.compile.QueryPlan) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) SequenceManager(org.apache.phoenix.compile.SequenceManager) StatementContext(org.apache.phoenix.compile.StatementContext) FilterableStatement(org.apache.phoenix.parse.FilterableStatement) List(java.util.List) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) OrderBy(org.apache.phoenix.compile.OrderByCompiler.OrderBy) ParallelIterators(org.apache.phoenix.iterate.ParallelIterators) GroupBy(org.apache.phoenix.compile.GroupByCompiler.GroupBy) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) PSchema(org.apache.phoenix.parse.PSchema) ParallelScanGrouper(org.apache.phoenix.iterate.ParallelScanGrouper) RowProjector(org.apache.phoenix.compile.RowProjector) Scan(org.apache.hadoop.hbase.client.Scan) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) TableRef(org.apache.phoenix.schema.TableRef) ParameterMetaData(java.sql.ParameterMetaData) PhoenixParameterMetaData(org.apache.phoenix.jdbc.PhoenixParameterMetaData) ExplainPlan(org.apache.phoenix.compile.ExplainPlan)

Example 8 with StatementContext

use of org.apache.phoenix.compile.StatementContext in project phoenix by apache.

the class TestUtil method getSingleSumAggregator.

public static ClientAggregators getSingleSumAggregator(String url, Properties props) throws SQLException {
    try (PhoenixConnection pconn = DriverManager.getConnection(url, props).unwrap(PhoenixConnection.class)) {
        PhoenixStatement statement = new PhoenixStatement(pconn);
        StatementContext context = new StatementContext(statement, null, new Scan(), new SequenceManager(statement));
        AggregationManager aggregationManager = context.getAggregationManager();
        SumAggregateFunction func = new SumAggregateFunction(Arrays.<Expression>asList(new KeyValueColumnExpression(new PLongColumn() {

            @Override
            public PName getName() {
                return SINGLE_COLUMN_NAME;
            }

            @Override
            public PName getFamilyName() {
                return SINGLE_COLUMN_FAMILY_NAME;
            }

            @Override
            public int getPosition() {
                return 0;
            }

            @Override
            public SortOrder getSortOrder() {
                return SortOrder.getDefault();
            }

            @Override
            public Integer getArraySize() {
                return 0;
            }

            @Override
            public byte[] getViewConstant() {
                return null;
            }

            @Override
            public boolean isViewReferenced() {
                return false;
            }

            @Override
            public String getExpressionStr() {
                return null;
            }

            @Override
            public boolean isRowTimestamp() {
                return false;
            }

            @Override
            public boolean isDynamic() {
                return false;
            }

            @Override
            public byte[] getColumnQualifierBytes() {
                return SINGLE_COLUMN_NAME.getBytes();
            }
        })), null);
        aggregationManager.setAggregators(new ClientAggregators(Collections.<SingleAggregateFunction>singletonList(func), 1));
        ClientAggregators aggregators = aggregationManager.getAggregators();
        return aggregators;
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ClientAggregators(org.apache.phoenix.expression.aggregator.ClientAggregators) SortOrder(org.apache.phoenix.schema.SortOrder) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) SequenceManager(org.apache.phoenix.compile.SequenceManager) StatementContext(org.apache.phoenix.compile.StatementContext) AggregationManager(org.apache.phoenix.compile.AggregationManager) PName(org.apache.phoenix.schema.PName) SumAggregateFunction(org.apache.phoenix.expression.function.SumAggregateFunction) Scan(org.apache.hadoop.hbase.client.Scan) PLongColumn(org.apache.phoenix.schema.PLongColumn) SingleAggregateFunction(org.apache.phoenix.expression.function.SingleAggregateFunction) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression)

Example 9 with StatementContext

use of org.apache.phoenix.compile.StatementContext in project phoenix by apache.

the class MetaDataClient method createIndex.

/**
     * Create an index table by morphing the CreateIndexStatement into a CreateTableStatement and calling
     * MetaDataClient.createTable. In doing so, we perform the following translations:
     * 1) Change the type of any columns being indexed to types that support null if the column is nullable.
     *    For example, a BIGINT type would be coerced to a DECIMAL type, since a DECIMAL type supports null
     *    when it's in the row key while a BIGINT does not.
     * 2) Append any row key column from the data table that is not in the indexed column list. Our indexes
     *    rely on having a 1:1 correspondence between the index and data rows.
     * 3) Change the name of the columns to include the column family. For example, if you have a column
     *    named "B" in a column family named "A", the indexed column name will be "A:B". This makes it easy
     *    to translate the column references in a query to the correct column references in an index table
     *    regardless of whether the column reference is prefixed with the column family name or not. It also
     *    has the side benefit of allowing the same named column in different column families to both be
     *    listed as an index column.
     * @param statement
     * @param splits
     * @return MutationState from population of index table from data table
     * @throws SQLException
     */
public MutationState createIndex(CreateIndexStatement statement, byte[][] splits) throws SQLException {
    IndexKeyConstraint ik = statement.getIndexConstraint();
    TableName indexTableName = statement.getIndexTableName();
    Map<String, Object> tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());
    Map<String, Object> commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size() + 1);
    populatePropertyMaps(statement.getProps(), tableProps, commonFamilyProps);
    List<Pair<ParseNode, SortOrder>> indexParseNodeAndSortOrderList = ik.getParseNodeAndSortOrderList();
    List<ColumnName> includedColumns = statement.getIncludeColumns();
    TableRef tableRef = null;
    PTable table = null;
    int numRetries = 0;
    boolean allocateIndexId = false;
    boolean isLocalIndex = statement.getIndexType() == IndexType.LOCAL;
    int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion();
    if (isLocalIndex) {
        if (!connection.getQueryServices().getProps().getBoolean(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, QueryServicesOptions.DEFAULT_ALLOW_LOCAL_INDEX)) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
        }
        if (!connection.getQueryServices().supportsFeature(Feature.LOCAL_INDEX)) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
        }
    }
    while (true) {
        try {
            ColumnResolver resolver = FromCompiler.getResolver(statement, connection, statement.getUdfParseNodes());
            tableRef = resolver.getTables().get(0);
            Date asyncCreatedDate = null;
            if (statement.isAsync()) {
                asyncCreatedDate = new Date(tableRef.getTimeStamp());
            }
            PTable dataTable = tableRef.getTable();
            boolean isTenantConnection = connection.getTenantId() != null;
            if (isTenantConnection) {
                if (dataTable.getType() != PTableType.VIEW) {
                    throw new SQLFeatureNotSupportedException("An index may only be created for a VIEW through a tenant-specific connection");
                }
            }
            if (!dataTable.isImmutableRows()) {
                if (hbaseVersion < PhoenixDatabaseMetaData.MUTABLE_SI_VERSION_THRESHOLD) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
                }
                if (!connection.getQueryServices().hasIndexWALCodec() && !dataTable.isTransactional()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_MUTABLE_INDEX_CONFIG).setTableName(indexTableName.getTableName()).build().buildException();
                }
            }
            int posOffset = 0;
            List<PColumn> pkColumns = dataTable.getPKColumns();
            Set<RowKeyColumnExpression> unusedPkColumns;
            if (dataTable.getBucketNum() != null) {
                // Ignore SALT column
                unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size() - 1);
                posOffset++;
            } else {
                unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size());
            }
            for (int i = posOffset; i < pkColumns.size(); i++) {
                PColumn column = pkColumns.get(i);
                unusedPkColumns.add(new RowKeyColumnExpression(column, new RowKeyValueAccessor(pkColumns, i), "\"" + column.getName().getString() + "\""));
            }
            List<ColumnDefInPkConstraint> allPkColumns = Lists.newArrayListWithExpectedSize(unusedPkColumns.size());
            List<ColumnDef> columnDefs = Lists.newArrayListWithExpectedSize(includedColumns.size() + indexParseNodeAndSortOrderList.size());
            /*
                 * Allocate an index ID in two circumstances:
                 * 1) for a local index, as all local indexes will reside in the same HBase table
                 * 2) for a view on an index.
                 */
            if (isLocalIndex || (dataTable.getType() == PTableType.VIEW && dataTable.getViewType() != ViewType.MAPPED)) {
                allocateIndexId = true;
                PDataType dataType = MetaDataUtil.getViewIndexIdDataType();
                ColumnName colName = ColumnName.caseSensitiveColumnName(MetaDataUtil.getViewIndexIdColumnName());
                allPkColumns.add(new ColumnDefInPkConstraint(colName, SortOrder.getDefault(), false));
                columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), false, null, null, false, SortOrder.getDefault(), null, false));
            }
            if (dataTable.isMultiTenant()) {
                PColumn col = dataTable.getPKColumns().get(posOffset);
                RowKeyColumnExpression columnExpression = new RowKeyColumnExpression(col, new RowKeyValueAccessor(pkColumns, posOffset), col.getName().getString());
                unusedPkColumns.remove(columnExpression);
                PDataType dataType = IndexUtil.getIndexColumnDataType(col);
                ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
                allPkColumns.add(new ColumnDefInPkConstraint(colName, col.getSortOrder(), false));
                columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, SortOrder.getDefault(), col.getName().getString(), col.isRowTimestamp()));
            }
            PhoenixStatement phoenixStatment = new PhoenixStatement(connection);
            StatementContext context = new StatementContext(phoenixStatment, resolver);
            IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context);
            Set<ColumnName> indexedColumnNames = Sets.newHashSetWithExpectedSize(indexParseNodeAndSortOrderList.size());
            for (Pair<ParseNode, SortOrder> pair : indexParseNodeAndSortOrderList) {
                ParseNode parseNode = pair.getFirst();
                // normalize the parse node
                parseNode = StatementNormalizer.normalize(parseNode, resolver);
                // compile the parseNode to get an expression
                expressionIndexCompiler.reset();
                Expression expression = parseNode.accept(expressionIndexCompiler);
                if (expressionIndexCompiler.isAggregate()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
                }
                if (expression.getDeterminism() != Determinism.ALWAYS) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
                }
                if (expression.isStateless()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
                }
                unusedPkColumns.remove(expression);
                // Go through parse node to get string as otherwise we
                // can lose information during compilation
                StringBuilder buf = new StringBuilder();
                parseNode.toSQL(resolver, buf);
                // need to escape backslash as this expression will be re-parsed later
                String expressionStr = StringUtil.escapeBackslash(buf.toString());
                ColumnName colName = null;
                ColumnRef colRef = expressionIndexCompiler.getColumnRef();
                boolean isRowTimestamp = false;
                if (colRef != null) {
                    // if this is a regular column
                    PColumn column = colRef.getColumn();
                    String columnFamilyName = column.getFamilyName() != null ? column.getFamilyName().getString() : null;
                    colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(columnFamilyName, column.getName().getString()));
                    isRowTimestamp = column.isRowTimestamp();
                    if (colRef.getColumn().getExpressionStr() != null) {
                        expressionStr = colRef.getColumn().getExpressionStr();
                    }
                } else {
                    // if this is an expression
                    // TODO column names cannot have double quotes, remove this once this PHOENIX-1621 is fixed
                    String name = expressionStr.replaceAll("\"", "'");
                    colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, name));
                }
                indexedColumnNames.add(colName);
                PDataType dataType = IndexUtil.getIndexColumnDataType(expression.isNullable(), expression.getDataType());
                allPkColumns.add(new ColumnDefInPkConstraint(colName, pair.getSecond(), isRowTimestamp));
                columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), expression.isNullable(), expression.getMaxLength(), expression.getScale(), false, pair.getSecond(), expressionStr, isRowTimestamp));
            }
            // Next all the PK columns from the data table that aren't indexed
            if (!unusedPkColumns.isEmpty()) {
                for (RowKeyColumnExpression colExpression : unusedPkColumns) {
                    PColumn col = dataTable.getPKColumns().get(colExpression.getPosition());
                    // we don't need these in the index
                    if (col.getViewConstant() == null) {
                        ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
                        allPkColumns.add(new ColumnDefInPkConstraint(colName, colExpression.getSortOrder(), col.isRowTimestamp()));
                        PDataType dataType = IndexUtil.getIndexColumnDataType(colExpression.isNullable(), colExpression.getDataType());
                        columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), colExpression.isNullable(), colExpression.getMaxLength(), colExpression.getScale(), false, colExpression.getSortOrder(), colExpression.toString(), col.isRowTimestamp()));
                    }
                }
            }
            // Last all the included columns (minus any PK columns)
            for (ColumnName colName : includedColumns) {
                PColumn col = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn();
                colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
                // Check for duplicates between indexed and included columns
                if (indexedColumnNames.contains(colName)) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_EXIST_IN_DEF).build().buildException();
                }
                if (!SchemaUtil.isPKColumn(col) && col.getViewConstant() == null) {
                    // Need to re-create ColumnName, since the above one won't have the column family name
                    colName = ColumnName.caseSensitiveColumnName(isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(col.getFamilyName().getString()) : col.getFamilyName().getString(), IndexUtil.getIndexColumnName(col));
                    columnDefs.add(FACTORY.columnDef(colName, col.getDataType().getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, col.getSortOrder(), col.getExpressionStr(), col.isRowTimestamp()));
                }
            }
            // We need this in the props so that the correct column family is created
            if (dataTable.getDefaultFamilyName() != null && dataTable.getType() != PTableType.VIEW && !allocateIndexId) {
                statement.getProps().put("", new Pair<String, Object>(DEFAULT_COLUMN_FAMILY_NAME, dataTable.getDefaultFamilyName().getString()));
            }
            PrimaryKeyConstraint pk = FACTORY.primaryKey(null, allPkColumns);
            tableProps.put(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME, dataTable.getName().getString());
            CreateTableStatement tableStatement = FACTORY.createTable(indexTableName, statement.getProps(), columnDefs, pk, statement.getSplitNodes(), PTableType.INDEX, statement.ifNotExists(), null, null, statement.getBindCount(), null);
            table = createTableInternal(tableStatement, splits, dataTable, null, null, null, null, allocateIndexId, statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps);
            break;
        } catch (ConcurrentTableMutationException e) {
            // Can happen if parent data table changes while above is in progress
            if (numRetries < 5) {
                numRetries++;
                continue;
            }
            throw e;
        }
    }
    if (table == null) {
        return new MutationState(0, 0, connection);
    }
    if (logger.isInfoEnabled())
        logger.info("Created index " + table.getName().getString() + " at " + table.getTimeStamp());
    boolean asyncIndexBuildEnabled = connection.getQueryServices().getProps().getBoolean(QueryServices.INDEX_ASYNC_BUILD_ENABLED, QueryServicesOptions.DEFAULT_INDEX_ASYNC_BUILD_ENABLED);
    // In async process, we return immediately as the MR job needs to be triggered .
    if (statement.isAsync() && asyncIndexBuildEnabled) {
        return new MutationState(0, 0, connection);
    }
    // connection so that our new index table is visible.
    if (connection.getSCN() != null) {
        return buildIndexAtTimeStamp(table, statement.getTable());
    }
    return buildIndex(table, tableRef);
}
Also used : SQLFeatureNotSupportedException(java.sql.SQLFeatureNotSupportedException) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) StatementContext(org.apache.phoenix.compile.StatementContext) PDataType(org.apache.phoenix.schema.types.PDataType) ParseNode(org.apache.phoenix.parse.ParseNode) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) Pair(org.apache.hadoop.hbase.util.Pair) CreateTableStatement(org.apache.phoenix.parse.CreateTableStatement) IndexKeyConstraint(org.apache.phoenix.parse.IndexKeyConstraint) ColumnDef(org.apache.phoenix.parse.ColumnDef) ColumnDefInPkConstraint(org.apache.phoenix.parse.ColumnDefInPkConstraint) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) IndexKeyConstraint(org.apache.phoenix.parse.IndexKeyConstraint) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) ColumnDefInPkConstraint(org.apache.phoenix.parse.ColumnDefInPkConstraint) PDate(org.apache.phoenix.schema.types.PDate) Date(java.sql.Date) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) TableName(org.apache.phoenix.parse.TableName) ColumnName(org.apache.phoenix.parse.ColumnName) MutationState(org.apache.phoenix.execute.MutationState) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) Expression(org.apache.phoenix.expression.Expression) IndexExpressionCompiler(org.apache.phoenix.compile.IndexExpressionCompiler)

Example 10 with StatementContext

use of org.apache.phoenix.compile.StatementContext in project phoenix by apache.

the class PTableImpl method newKey.

@Override
public int newKey(ImmutableBytesWritable key, byte[][] values) {
    List<PColumn> columns = getPKColumns();
    int nValues = values.length;
    while (nValues > 0 && (values[nValues - 1] == null || values[nValues - 1].length == 0)) {
        nValues--;
    }
    for (PColumn column : columns) {
        if (column.getExpressionStr() != null) {
            nValues++;
        }
    }
    int i = 0;
    TrustedByteArrayOutputStream os = new TrustedByteArrayOutputStream(SchemaUtil.estimateKeyLength(this));
    try {
        Integer bucketNum = this.getBucketNum();
        if (bucketNum != null) {
            // Write place holder for salt byte
            i++;
            os.write(QueryConstants.SEPARATOR_BYTE_ARRAY);
        }
        int nColumns = columns.size();
        PDataType type = null;
        SortOrder sortOrder = null;
        boolean wasNull = false;
        while (i < nValues && i < nColumns) {
            // Separate variable length column values in key with zero byte
            if (type != null && !type.isFixedWidth()) {
                os.write(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable(), wasNull, sortOrder));
            }
            PColumn column = columns.get(i);
            sortOrder = column.getSortOrder();
            type = column.getDataType();
            // This will throw if the value is null and the type doesn't allow null
            byte[] byteValue = values[i++];
            if (byteValue == null) {
                if (column.getExpressionStr() != null) {
                    try {
                        String url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + PhoenixRuntime.CONNECTIONLESS;
                        PhoenixConnection conn = DriverManager.getConnection(url).unwrap(PhoenixConnection.class);
                        StatementContext context = new StatementContext(new PhoenixStatement(conn));
                        ExpressionCompiler compiler = new ExpressionCompiler(context);
                        ParseNode defaultParseNode = new SQLParser(column.getExpressionStr()).parseExpression();
                        Expression defaultExpression = defaultParseNode.accept(compiler);
                        defaultExpression.evaluate(null, key);
                        column.getDataType().coerceBytes(key, null, defaultExpression.getDataType(), defaultExpression.getMaxLength(), defaultExpression.getScale(), defaultExpression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder());
                        byteValue = ByteUtil.copyKeyBytesIfNecessary(key);
                    } catch (SQLException e) {
                        // should not be possible
                        throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " failed to compile default value expression of " + column.getExpressionStr());
                    }
                } else {
                    byteValue = ByteUtil.EMPTY_BYTE_ARRAY;
                }
            }
            wasNull = byteValue.length == 0;
            // here.
            if (byteValue.length == 0 && !column.isNullable()) {
                throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
            }
            Integer maxLength = column.getMaxLength();
            Integer scale = column.getScale();
            key.set(byteValue);
            if (!type.isSizeCompatible(key, null, type, sortOrder, null, null, maxLength, scale)) {
                throw new DataExceedsCapacityException(name.getString() + "." + column.getName().getString() + " may not exceed " + maxLength + " (" + SchemaUtil.toString(type, byteValue) + ")");
            }
            key.set(byteValue);
            type.pad(key, maxLength, sortOrder);
            byteValue = ByteUtil.copyKeyBytesIfNecessary(key);
            os.write(byteValue, 0, byteValue.length);
        }
        // Need trailing byte for DESC columns
        if (type != null && !type.isFixedWidth() && SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable(), wasNull, sortOrder) == QueryConstants.DESC_SEPARATOR_BYTE) {
            os.write(QueryConstants.DESC_SEPARATOR_BYTE);
        }
        // If some non null pk values aren't set, then throw
        if (i < nColumns) {
            PColumn column = columns.get(i);
            if (column.getDataType().isFixedWidth() || !column.isNullable()) {
                throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
            }
        }
        if (nValues == 0) {
            throw new ConstraintViolationException("Primary key may not be null (" + name.getString() + ")");
        }
        byte[] buf = os.getBuffer();
        int size = os.size();
        if (bucketNum != null) {
            buf[0] = SaltingUtil.getSaltingByte(buf, 1, size - 1, bucketNum);
        }
        key.set(buf, 0, size);
        return i;
    } finally {
        try {
            os.close();
        } catch (IOException e) {
            // Impossible
            throw new RuntimeException(e);
        }
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SQLException(java.sql.SQLException) TrustedByteArrayOutputStream(org.apache.phoenix.util.TrustedByteArrayOutputStream) IOException(java.io.IOException) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) StatementContext(org.apache.phoenix.compile.StatementContext) DataExceedsCapacityException(org.apache.phoenix.exception.DataExceedsCapacityException) PDataType(org.apache.phoenix.schema.types.PDataType) SQLParser(org.apache.phoenix.parse.SQLParser) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ParseNode(org.apache.phoenix.parse.ParseNode) ExpressionCompiler(org.apache.phoenix.compile.ExpressionCompiler)

Aggregations

StatementContext (org.apache.phoenix.compile.StatementContext)19 PhoenixStatement (org.apache.phoenix.jdbc.PhoenixStatement)10 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)8 ParseNode (org.apache.phoenix.parse.ParseNode)7 SQLException (java.sql.SQLException)6 ColumnResolver (org.apache.phoenix.compile.ColumnResolver)6 Expression (org.apache.phoenix.expression.Expression)6 ArrayList (java.util.ArrayList)5 SQLParser (org.apache.phoenix.parse.SQLParser)5 PTable (org.apache.phoenix.schema.PTable)5 Scan (org.apache.hadoop.hbase.client.Scan)4 RowKeyColumnExpression (org.apache.phoenix.expression.RowKeyColumnExpression)4 ResultIterator (org.apache.phoenix.iterate.ResultIterator)4 PDataType (org.apache.phoenix.schema.types.PDataType)4 PreparedStatement (java.sql.PreparedStatement)3 List (java.util.List)3 RowProjector (org.apache.phoenix.compile.RowProjector)3 KeyValueColumnExpression (org.apache.phoenix.expression.KeyValueColumnExpression)3 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)3 PhoenixResultSet (org.apache.phoenix.jdbc.PhoenixResultSet)3