Search in sources :

Example 11 with PhoenixStatement

use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.

the class PostDDLCompiler method compile.

public MutationPlan compile(final List<TableRef> tableRefs, final byte[] emptyCF, final List<byte[]> projectCFs, final List<PColumn> deleteList, final long timestamp) throws SQLException {
    PhoenixStatement statement = new PhoenixStatement(connection);
    final StatementContext context = new StatementContext(statement, new ColumnResolver() {

        @Override
        public List<TableRef> getTables() {
            return tableRefs;
        }

        @Override
        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public List<PFunction> getFunctions() {
            return Collections.<PFunction>emptyList();
        }

        @Override
        public PFunction resolveFunction(String functionName) throws SQLException {
            throw new FunctionNotFoundException(functionName);
        }

        @Override
        public boolean hasUDFs() {
            return false;
        }

        @Override
        public PSchema resolveSchema(String schemaName) throws SQLException {
            throw new SchemaNotFoundException(schemaName);
        }

        @Override
        public List<PSchema> getSchemas() {
            throw new UnsupportedOperationException();
        }
    }, scan, new SequenceManager(statement));
    return new BaseMutationPlan(context, Operation.UPSERT) {

        /* FIXME */
        @Override
        public MutationState execute() throws SQLException {
            if (tableRefs.isEmpty()) {
                return new MutationState(0, 1000, connection);
            }
            boolean wasAutoCommit = connection.getAutoCommit();
            try {
                connection.setAutoCommit(true);
                SQLException sqlE = null;
                /*
                     * Handles:
                     * 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX;
                     * 2) deletion of all column values for a ALTER TABLE DROP COLUMN
                     * 3) updating the necessary rows to have an empty KV
                     * 4) updating table stats
                     */
                long totalMutationCount = 0;
                for (final TableRef tableRef : tableRefs) {
                    Scan scan = ScanUtil.newScan(context.getScan());
                    SelectStatement select = SelectStatement.COUNT_ONE;
                    // We need to use this tableRef
                    ColumnResolver resolver = new ColumnResolver() {

                        @Override
                        public List<TableRef> getTables() {
                            return Collections.singletonList(tableRef);
                        }

                        @Override
                        public java.util.List<PFunction> getFunctions() {
                            return Collections.emptyList();
                        }

                        ;

                        @Override
                        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
                            PColumn column = tableName != null ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) : tableRef.getTable().getColumnForColumnName(colName);
                            return new ColumnRef(tableRef, column.getPosition());
                        }

                        @Override
                        public PFunction resolveFunction(String functionName) throws SQLException {
                            throw new UnsupportedOperationException();
                        }

                        ;

                        @Override
                        public boolean hasUDFs() {
                            return false;
                        }

                        @Override
                        public List<PSchema> getSchemas() {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public PSchema resolveSchema(String schemaName) throws SQLException {
                            throw new SchemaNotFoundException(schemaName);
                        }
                    };
                    PhoenixStatement statement = new PhoenixStatement(connection);
                    StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
                    long ts = timestamp;
                    // in this case, so maybe this is ok.
                    if (ts != HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
                        ts = TransactionUtil.convertToNanoseconds(ts);
                    }
                    ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts);
                    if (emptyCF != null) {
                        scan.setAttribute(BaseScannerRegionObserver.EMPTY_CF, emptyCF);
                        scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst());
                    }
                    ServerCache cache = null;
                    try {
                        if (deleteList != null) {
                            if (deleteList.isEmpty()) {
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
                            // In the case of a row deletion, add index metadata so mutable secondary indexing works
                            /* TODO: we currently manually run a scan to delete the index data here
                                    ImmutableBytesWritable ptr = context.getTempPtr();
                                    tableRef.getTable().getIndexMaintainers(ptr);
                                    if (ptr.getLength() > 0) {
                                        IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
                                        cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
                                        byte[] uuidValue = cache.getId();
                                        scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                    }
                                    */
                            } else {
                                // In the case of the empty key value column family changing, do not send the index
                                // metadata, as we're currently managing this from the client. It's possible for the
                                // data empty column family to stay the same, while the index empty column family
                                // changes.
                                PColumn column = deleteList.get(0);
                                byte[] cq = column.getColumnQualifierBytes();
                                if (emptyCF == null) {
                                    scan.addColumn(column.getFamilyName().getBytes(), cq);
                                }
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_CF, column.getFamilyName().getBytes());
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_CQ, cq);
                            }
                        }
                        List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
                        if (projectCFs == null) {
                            for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
                                columnFamilies.add(family.getName().getBytes());
                            }
                        } else {
                            for (byte[] projectCF : projectCFs) {
                                columnFamilies.add(projectCF);
                            }
                        }
                        // Need to project all column families into the scan, since we haven't yet created our empty key value
                        RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
                        context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
                        // since at this point we haven't added the empty key value everywhere.
                        if (columnFamilies != null) {
                            scan.getFamilyMap().clear();
                            for (byte[] family : columnFamilies) {
                                scan.addFamily(family);
                            }
                            projector = new RowProjector(projector, false);
                        }
                        // any other Post DDL operations.
                        try {
                            // Since dropping a VIEW does not affect the underlying data, we do
                            // not need to pass through the view statement here.
                            // Push where clause into scan
                            WhereCompiler.compile(context, select);
                        } catch (ColumnFamilyNotFoundException e) {
                            continue;
                        } catch (ColumnNotFoundException e) {
                            continue;
                        } catch (AmbiguousColumnException e) {
                            continue;
                        }
                        QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
                        try {
                            ResultIterator iterator = plan.iterator();
                            try {
                                Tuple row = iterator.next();
                                ImmutableBytesWritable ptr = context.getTempPtr();
                                totalMutationCount += (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
                            } catch (SQLException e) {
                                sqlE = e;
                            } finally {
                                try {
                                    iterator.close();
                                } catch (SQLException e) {
                                    if (sqlE == null) {
                                        sqlE = e;
                                    } else {
                                        sqlE.setNextException(e);
                                    }
                                } finally {
                                    if (sqlE != null) {
                                        throw sqlE;
                                    }
                                }
                            }
                        } catch (TableNotFoundException e) {
                        // Ignore and continue, as HBase throws when table hasn't been written to
                        // FIXME: Remove if this is fixed in 0.96
                        }
                    } finally {
                        if (cache != null) {
                            // Remove server cache if there is one
                            cache.close();
                        }
                    }
                }
                final long count = totalMutationCount;
                return new MutationState(1, 1000, connection) {

                    @Override
                    public long getUpdateCount() {
                        return count;
                    }
                };
            } finally {
                if (!wasAutoCommit)
                    connection.setAutoCommit(wasAutoCommit);
            }
        }
    };
}
Also used : ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) PFunction(org.apache.phoenix.parse.PFunction) SQLException(java.sql.SQLException) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) List(java.util.List) AmbiguousColumnException(org.apache.phoenix.schema.AmbiguousColumnException) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ResultIterator(org.apache.phoenix.iterate.ResultIterator) PSchema(org.apache.phoenix.parse.PSchema) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) FunctionNotFoundException(org.apache.phoenix.schema.FunctionNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) MutationState(org.apache.phoenix.execute.MutationState) Scan(org.apache.hadoop.hbase.client.Scan) ColumnRef(org.apache.phoenix.schema.ColumnRef) SchemaNotFoundException(org.apache.phoenix.schema.SchemaNotFoundException) TableRef(org.apache.phoenix.schema.TableRef) Tuple(org.apache.phoenix.schema.tuple.Tuple)

Example 12 with PhoenixStatement

use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.

the class PostIndexDDLCompiler method compile.

public MutationPlan compile(final PTable indexTable) throws SQLException {
    /*
         * Handles:
         * 1) Populate a newly created table with contents.
         * 2) Activate the index by setting the INDEX_STATE to 
         */
    // NOTE: For first version, we would use a upsert/select to populate the new index table and
    //   returns synchronously. Creating an index on an existing table with large amount of data
    //   will as a result take a very very long time.
    //   In the long term, we should change this to an asynchronous process to populate the index
    //   that would allow the user to easily monitor the process of index creation.
    StringBuilder indexColumns = new StringBuilder();
    StringBuilder dataColumns = new StringBuilder();
    // Add the pk index columns
    List<PColumn> indexPKColumns = indexTable.getPKColumns();
    int nIndexPKColumns = indexTable.getPKColumns().size();
    boolean isSalted = indexTable.getBucketNum() != null;
    boolean isMultiTenant = connection.getTenantId() != null && indexTable.isMultiTenant();
    boolean isViewIndex = indexTable.getViewIndexId() != null;
    int posOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isViewIndex ? 1 : 0);
    for (int i = posOffset; i < nIndexPKColumns; i++) {
        PColumn col = indexPKColumns.get(i);
        String indexColName = col.getName().getString();
        // need to escape backslash as this used in the SELECT statement
        String dataColName = StringUtil.escapeBackslash(col.getExpressionStr());
        dataColumns.append(dataColName).append(",");
        indexColumns.append('"').append(indexColName).append("\",");
        indexColumnNames.add(indexColName);
        dataColumnNames.add(dataColName);
    }
    // Add the covered columns
    for (PColumnFamily family : indexTable.getColumnFamilies()) {
        for (PColumn col : family.getColumns()) {
            if (col.getViewConstant() == null) {
                String indexColName = col.getName().getString();
                String dataFamilyName = IndexUtil.getDataColumnFamilyName(indexColName);
                String dataColumnName = IndexUtil.getDataColumnName(indexColName);
                if (!dataFamilyName.equals("")) {
                    dataColumns.append('"').append(dataFamilyName).append("\".");
                }
                dataColumns.append('"').append(dataColumnName).append("\",");
                indexColumns.append('"').append(indexColName).append("\",");
                indexColumnNames.add(indexColName);
                dataColumnNames.add(dataColumnName);
            }
        }
    }
    final PTable dataTable = dataTableRef.getTable();
    dataColumns.setLength(dataColumns.length() - 1);
    indexColumns.setLength(indexColumns.length() - 1);
    String schemaName = dataTable.getSchemaName().getString();
    String tableName = indexTable.getTableName().getString();
    StringBuilder updateStmtStr = new StringBuilder();
    updateStmtStr.append("UPSERT /*+ NO_INDEX */ INTO ").append(schemaName.length() == 0 ? "" : '"' + schemaName + "\".").append('"').append(tableName).append("\"(").append(indexColumns).append(") ");
    final StringBuilder selectQueryBuilder = new StringBuilder();
    selectQueryBuilder.append(" SELECT ").append(dataColumns).append(" FROM ").append(schemaName.length() == 0 ? "" : '"' + schemaName + "\".").append('"').append(dataTable.getTableName().getString()).append('"');
    this.selectQuery = selectQueryBuilder.toString();
    updateStmtStr.append(this.selectQuery);
    try (final PhoenixStatement statement = new PhoenixStatement(connection)) {
        DelegateMutationPlan delegate = new DelegateMutationPlan(statement.compileMutation(updateStmtStr.toString())) {

            @Override
            public MutationState execute() throws SQLException {
                connection.getMutationState().commitDDLFence(dataTable);
                return super.execute();
            }
        };
        return delegate;
    }
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable)

Example 13 with PhoenixStatement

use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.

the class DeleteCompiler method deleteRows.

private static MutationState deleteRows(StatementContext childContext, TableRef targetTableRef, List<TableRef> indexTableRefs, ResultIterator iterator, RowProjector projector, TableRef sourceTableRef) throws SQLException {
    PTable table = targetTableRef.getTable();
    PhoenixStatement statement = childContext.getStatement();
    PhoenixConnection connection = statement.getConnection();
    PName tenantId = connection.getTenantId();
    byte[] tenantIdBytes = null;
    if (tenantId != null) {
        tenantIdBytes = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, table.getViewIndexId() != null);
    }
    final boolean isAutoCommit = connection.getAutoCommit();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
    Map<ImmutableBytesPtr, RowMutationState> mutations = Maps.newHashMapWithExpectedSize(batchSize);
    List<Map<ImmutableBytesPtr, RowMutationState>> indexMutations = null;
    // the data table through a single query to save executing an additional one.
    if (!indexTableRefs.isEmpty()) {
        indexMutations = Lists.newArrayListWithExpectedSize(indexTableRefs.size());
        for (int i = 0; i < indexTableRefs.size(); i++) {
            indexMutations.add(Maps.<ImmutableBytesPtr, RowMutationState>newHashMapWithExpectedSize(batchSize));
        }
    }
    List<PColumn> pkColumns = table.getPKColumns();
    boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null;
    boolean isSharedViewIndex = table.getViewIndexId() != null;
    int offset = (table.getBucketNum() == null ? 0 : 1);
    byte[][] values = new byte[pkColumns.size()][];
    if (isSharedViewIndex) {
        values[offset++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
    }
    if (isMultiTenant) {
        values[offset++] = tenantIdBytes;
    }
    try (PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
        int rowCount = 0;
        while (rs.next()) {
            // allocate new as this is a key in a Map
            ImmutableBytesPtr ptr = new ImmutableBytesPtr();
            // there's no transation required.
            if (sourceTableRef.equals(targetTableRef)) {
                rs.getCurrentRow().getKey(ptr);
            } else {
                for (int i = offset; i < values.length; i++) {
                    byte[] byteValue = rs.getBytes(i + 1 - offset);
                    // TODO: consider going under the hood and just getting the bytes
                    if (pkColumns.get(i).getSortOrder() == SortOrder.DESC) {
                        byte[] tempByteValue = Arrays.copyOf(byteValue, byteValue.length);
                        byteValue = SortOrder.invert(byteValue, 0, tempByteValue, 0, byteValue.length);
                    }
                    values[i] = byteValue;
                }
                table.newKey(ptr, values);
            }
            // When issuing deletes, we do not care about the row time ranges. Also, if the table had a row timestamp column, then the
            // row key will already have its value. 
            mutations.put(ptr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
            for (int i = 0; i < indexTableRefs.size(); i++) {
                // allocate new as this is a key in a Map
                ImmutableBytesPtr indexPtr = new ImmutableBytesPtr();
                rs.getCurrentRow().getKey(indexPtr);
                indexMutations.get(i).put(indexPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
            }
            if (mutations.size() > maxSize) {
                throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize);
            }
            rowCount++;
            // Commit a batch if auto commit is true and we're at our batch size
            if (isAutoCommit && rowCount % batchSize == 0) {
                MutationState state = new MutationState(targetTableRef, mutations, 0, maxSize, maxSizeBytes, connection);
                connection.getMutationState().join(state);
                for (int i = 0; i < indexTableRefs.size(); i++) {
                    MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
                    connection.getMutationState().join(indexState);
                }
                connection.getMutationState().send();
                mutations.clear();
                if (indexMutations != null) {
                    indexMutations.clear();
                }
            }
        }
        // If auto commit is true, this last batch will be committed upon return
        int nCommittedRows = isAutoCommit ? (rowCount / batchSize * batchSize) : 0;
        MutationState state = new MutationState(targetTableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection);
        for (int i = 0; i < indexTableRefs.size(); i++) {
            // To prevent the counting of these index rows, we have a negative for remainingRows.
            MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
            state.join(indexState);
        }
        return state;
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) PColumn(org.apache.phoenix.schema.PColumn) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) PName(org.apache.phoenix.schema.PName) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) Map(java.util.Map) HashMap(java.util.HashMap) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 14 with PhoenixStatement

use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.

the class ColumnRef method newColumnExpression.

public Expression newColumnExpression(boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) throws SQLException {
    PTable table = tableRef.getTable();
    PColumn column = this.getColumn();
    String displayName = tableRef.getColumnDisplayName(this, schemaNameCaseSensitive, colNameCaseSensitive);
    if (SchemaUtil.isPKColumn(column)) {
        return new RowKeyColumnExpression(column, new RowKeyValueAccessor(table.getPKColumns(), pkSlotPosition), displayName);
    }
    if (table.getType() == PTableType.PROJECTED || table.getType() == PTableType.SUBQUERY) {
        return new ProjectedColumnExpression(column, table, displayName);
    }
    Expression expression = table.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS ? new SingleCellColumnExpression(column, displayName, table.getEncodingScheme()) : new KeyValueColumnExpression(column, displayName);
    if (column.getExpressionStr() != null) {
        String url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + PhoenixRuntime.CONNECTIONLESS;
        PhoenixConnection conn = DriverManager.getConnection(url).unwrap(PhoenixConnection.class);
        StatementContext context = new StatementContext(new PhoenixStatement(conn));
        ExpressionCompiler compiler = new ExpressionCompiler(context);
        ParseNode defaultParseNode = new SQLParser(column.getExpressionStr()).parseExpression();
        Expression defaultExpression = defaultParseNode.accept(compiler);
        if (!ExpressionUtil.isNull(defaultExpression, new ImmutableBytesWritable())) {
            return new DefaultValueExpression(Arrays.asList(expression, defaultExpression));
        }
    }
    return expression;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) StatementContext(org.apache.phoenix.compile.StatementContext) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) DefaultValueExpression(org.apache.phoenix.expression.function.DefaultValueExpression) SQLParser(org.apache.phoenix.parse.SQLParser) ParseNode(org.apache.phoenix.parse.ParseNode) ExpressionCompiler(org.apache.phoenix.compile.ExpressionCompiler) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) DefaultValueExpression(org.apache.phoenix.expression.function.DefaultValueExpression)

Example 15 with PhoenixStatement

use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.

the class IndexUtil method rewriteViewStatement.

/**
     * Rewrite a view statement to be valid against an index
     * @param conn
     * @param index
     * @param table
     * @return
     * @throws SQLException
     */
public static String rewriteViewStatement(PhoenixConnection conn, PTable index, PTable table, String viewStatement) throws SQLException {
    if (viewStatement == null) {
        return null;
    }
    SelectStatement select = new SQLParser(viewStatement).parseQuery();
    ColumnResolver resolver = FromCompiler.getResolver(new TableRef(table));
    SelectStatement translatedSelect = IndexStatementRewriter.translate(select, resolver);
    ParseNode whereNode = translatedSelect.getWhere();
    PhoenixStatement statement = new PhoenixStatement(conn);
    TableRef indexTableRef = new TableRef(index) {

        @Override
        public String getColumnDisplayName(ColumnRef ref, boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) {
            return '"' + ref.getColumn().getName().getString() + '"';
        }
    };
    ColumnResolver indexResolver = FromCompiler.getResolver(indexTableRef);
    StatementContext context = new StatementContext(statement, indexResolver);
    // Compile to ensure validity
    WhereCompiler.compile(context, whereNode);
    StringBuilder buf = new StringBuilder();
    whereNode.toSQL(indexResolver, buf);
    return QueryUtil.getViewStatement(index.getSchemaName().getString(), index.getTableName().getString(), buf.toString());
}
Also used : SelectStatement(org.apache.phoenix.parse.SelectStatement) SQLParser(org.apache.phoenix.parse.SQLParser) ParseNode(org.apache.phoenix.parse.ParseNode) ColumnRef(org.apache.phoenix.schema.ColumnRef) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) TableRef(org.apache.phoenix.schema.TableRef) StatementContext(org.apache.phoenix.compile.StatementContext)

Aggregations

PhoenixStatement (org.apache.phoenix.jdbc.PhoenixStatement)64 Connection (java.sql.Connection)47 Test (org.junit.Test)42 BaseConnectionlessQueryTest (org.apache.phoenix.query.BaseConnectionlessQueryTest)34 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)13 ResultSet (java.sql.ResultSet)10 StatementContext (org.apache.phoenix.compile.StatementContext)10 QueryPlan (org.apache.phoenix.compile.QueryPlan)8 PTable (org.apache.phoenix.schema.PTable)8 SQLException (java.sql.SQLException)7 Statement (java.sql.Statement)7 PhoenixResultSet (org.apache.phoenix.jdbc.PhoenixResultSet)7 PColumn (org.apache.phoenix.schema.PColumn)7 ArrayList (java.util.ArrayList)6 Properties (java.util.Properties)6 MutationState (org.apache.phoenix.execute.MutationState)6 PreparedStatement (java.sql.PreparedStatement)5 List (java.util.List)5 Scan (org.apache.hadoop.hbase.client.Scan)5 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)5