Search in sources :

Example 21 with PhoenixStatement

use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.

the class QueryOptimizerTest method testChooseIndexOverTable.

@Test
public void testChooseIndexOverTable() throws Exception {
    Connection conn = DriverManager.getConnection(getUrl());
    conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true");
    conn.createStatement().execute("CREATE INDEX idx ON t(v1)");
    PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
    QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE v1 = 'bar'");
    assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString());
}
Also used : Connection(java.sql.Connection) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) Test(org.junit.Test) BaseConnectionlessQueryTest(org.apache.phoenix.query.BaseConnectionlessQueryTest)

Example 22 with PhoenixStatement

use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.

the class UpsertCompiler method upsertSelect.

public static MutationState upsertSelect(StatementContext childContext, TableRef tableRef, RowProjector projector, ResultIterator iterator, int[] columnIndexes, int[] pkSlotIndexes, boolean useServerTimestamp, boolean prefixSysColValues) throws SQLException {
    PhoenixStatement statement = childContext.getStatement();
    PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
    boolean isAutoCommit = connection.getAutoCommit();
    int numSplColumns = (tableRef.getTable().isMultiTenant() ? 1 : 0) + (tableRef.getTable().getViewIndexId() != null ? 1 : 0);
    byte[][] values = new byte[columnIndexes.length + numSplColumns][];
    if (prefixSysColValues) {
        int i = 0;
        if (tableRef.getTable().isMultiTenant()) {
            values[i++] = connection.getTenantId().getBytes();
        }
        if (tableRef.getTable().getViewIndexId() != null) {
            values[i++] = PSmallint.INSTANCE.toBytes(tableRef.getTable().getViewIndexId());
        }
    }
    int rowCount = 0;
    Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(batchSize);
    PTable table = tableRef.getTable();
    IndexMaintainer indexMaintainer = null;
    byte[][] viewConstants = null;
    if (table.getIndexType() == IndexType.LOCAL) {
        PTable parentTable = statement.getConnection().getMetaDataCache().getTableRef(new PTableKey(statement.getConnection().getTenantId(), table.getParentName().getString())).getTable();
        indexMaintainer = table.getIndexMaintainer(parentTable, connection);
        viewConstants = IndexUtil.getViewConstants(parentTable);
    }
    try (ResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        while (rs.next()) {
            for (int i = 0, j = numSplColumns; j < values.length; j++, i++) {
                PColumn column = table.getColumns().get(columnIndexes[i]);
                byte[] bytes = rs.getBytes(i + 1);
                ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes);
                Object value = rs.getObject(i + 1);
                int rsPrecision = rs.getMetaData().getPrecision(i + 1);
                Integer precision = rsPrecision == 0 ? null : rsPrecision;
                int rsScale = rs.getMetaData().getScale(i + 1);
                Integer scale = rsScale == 0 ? null : rsScale;
                // as we checked that before.
                if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), SortOrder.getDefault(), precision, scale, column.getMaxLength(), column.getScale())) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName(column.getName().getString()).setMessage("value=" + column.getDataType().toStringLiteral(ptr, null)).build().buildException();
                }
                column.getDataType().coerceBytes(ptr, value, column.getDataType(), precision, scale, SortOrder.getDefault(), column.getMaxLength(), column.getScale(), column.getSortOrder(), table.rowKeyOrderOptimizable());
                values[j] = ByteUtil.copyKeyBytesIfNecessary(ptr);
            }
            setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement, useServerTimestamp, indexMaintainer, viewConstants, null, numSplColumns);
            rowCount++;
            // Commit a batch if auto commit is true and we're at our batch size
            if (isAutoCommit && rowCount % batchSize == 0) {
                MutationState state = new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection);
                connection.getMutationState().join(state);
                connection.getMutationState().send();
                mutation.clear();
            }
        }
        // If auto commit is true, this last batch will be committed upon return
        return new MutationState(tableRef, mutation, rowCount / batchSize * batchSize, maxSize, maxSizeBytes, connection);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) Hint(org.apache.phoenix.parse.HintNode.Hint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) ResultSet(java.sql.ResultSet) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) PTableKey(org.apache.phoenix.schema.PTableKey) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 23 with PhoenixStatement

use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.

the class JoinQueryCompilerTest method getJoinTable.

private static JoinTable getJoinTable(String query, PhoenixConnection connection) throws SQLException {
    SQLParser parser = new SQLParser(query);
    SelectStatement select = SubselectRewriter.flatten(parser.parseQuery(), connection);
    ColumnResolver resolver = FromCompiler.getResolverForQuery(select, connection);
    select = StatementNormalizer.normalize(select, resolver);
    SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolver, connection);
    if (transformedSelect != select) {
        resolver = FromCompiler.getResolverForQuery(transformedSelect, connection);
        select = StatementNormalizer.normalize(transformedSelect, resolver);
    }
    PhoenixStatement stmt = connection.createStatement().unwrap(PhoenixStatement.class);
    return JoinCompiler.compile(stmt, select, resolver);
}
Also used : SelectStatement(org.apache.phoenix.parse.SelectStatement) SQLParser(org.apache.phoenix.parse.SQLParser) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement)

Example 24 with PhoenixStatement

use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.

the class ParallelIteratorsSplitTest method getSplits.

private static List<KeyRange> getSplits(final TableRef tableRef, final Scan scan, final List<HRegionLocation> regions, final ScanRanges scanRanges) throws SQLException {
    final List<TableRef> tableRefs = Collections.singletonList(tableRef);
    ColumnResolver resolver = new ColumnResolver() {

        @Override
        public List<PFunction> getFunctions() {
            return Collections.emptyList();
        }

        @Override
        public List<TableRef> getTables() {
            return tableRefs;
        }

        @Override
        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public PFunction resolveFunction(String functionName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public boolean hasUDFs() {
            return false;
        }

        @Override
        public PSchema resolveSchema(String schemaName) throws SQLException {
            return null;
        }

        @Override
        public List<PSchema> getSchemas() {
            return null;
        }
    };
    PhoenixConnection connection = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    final PhoenixStatement statement = new PhoenixStatement(connection);
    final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
    context.setScanRanges(scanRanges);
    ParallelIterators parallelIterators = new ParallelIterators(new QueryPlan() {

        private final Set<TableRef> tableRefs = ImmutableSet.of(tableRef);

        @Override
        public StatementContext getContext() {
            return context;
        }

        @Override
        public ParameterMetaData getParameterMetaData() {
            return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA;
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
            return ExplainPlan.EMPTY_PLAN;
        }

        @Override
        public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
            return ResultIterator.EMPTY_ITERATOR;
        }

        @Override
        public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
            return ResultIterator.EMPTY_ITERATOR;
        }

        @Override
        public ResultIterator iterator() throws SQLException {
            return ResultIterator.EMPTY_ITERATOR;
        }

        @Override
        public long getEstimatedSize() {
            return 0;
        }

        @Override
        public Set<TableRef> getSourceRefs() {
            return tableRefs;
        }

        @Override
        public TableRef getTableRef() {
            return tableRef;
        }

        @Override
        public RowProjector getProjector() {
            return RowProjector.EMPTY_PROJECTOR;
        }

        @Override
        public Integer getLimit() {
            return null;
        }

        @Override
        public Integer getOffset() {
            return null;
        }

        @Override
        public OrderBy getOrderBy() {
            return OrderBy.EMPTY_ORDER_BY;
        }

        @Override
        public GroupBy getGroupBy() {
            return GroupBy.EMPTY_GROUP_BY;
        }

        @Override
        public List<KeyRange> getSplits() {
            return null;
        }

        @Override
        public FilterableStatement getStatement() {
            return SelectStatement.SELECT_ONE;
        }

        @Override
        public boolean isDegenerate() {
            return false;
        }

        @Override
        public boolean isRowKeyOrdered() {
            return true;
        }

        @Override
        public List<List<Scan>> getScans() {
            return null;
        }

        @Override
        public Operation getOperation() {
            return Operation.QUERY;
        }

        @Override
        public boolean useRoundRobinIterator() {
            return false;
        }

        @Override
        public Long getEstimatedRowsToScan() {
            return null;
        }

        @Override
        public Long getEstimatedBytesToScan() {
            return null;
        }
    }, null, new SpoolingResultIterator.SpoolingResultIteratorFactory(context.getConnection().getQueryServices()), context.getScan(), false);
    List<KeyRange> keyRanges = parallelIterators.getSplits();
    return keyRanges;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) PFunction(org.apache.phoenix.parse.PFunction) SQLException(java.sql.SQLException) Operation(org.apache.phoenix.jdbc.PhoenixStatement.Operation) QueryPlan(org.apache.phoenix.compile.QueryPlan) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) SequenceManager(org.apache.phoenix.compile.SequenceManager) StatementContext(org.apache.phoenix.compile.StatementContext) FilterableStatement(org.apache.phoenix.parse.FilterableStatement) List(java.util.List) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) OrderBy(org.apache.phoenix.compile.OrderByCompiler.OrderBy) ParallelIterators(org.apache.phoenix.iterate.ParallelIterators) GroupBy(org.apache.phoenix.compile.GroupByCompiler.GroupBy) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) PSchema(org.apache.phoenix.parse.PSchema) ParallelScanGrouper(org.apache.phoenix.iterate.ParallelScanGrouper) RowProjector(org.apache.phoenix.compile.RowProjector) Scan(org.apache.hadoop.hbase.client.Scan) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) TableRef(org.apache.phoenix.schema.TableRef) ParameterMetaData(java.sql.ParameterMetaData) PhoenixParameterMetaData(org.apache.phoenix.jdbc.PhoenixParameterMetaData) ExplainPlan(org.apache.phoenix.compile.ExplainPlan)

Example 25 with PhoenixStatement

use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.

the class SequenceIT method testCompilerOptimization.

@Test
public void testCompilerOptimization() throws Exception {
    nextConnection();
    conn.createStatement().execute("CREATE SEQUENCE seq.perf START WITH 3 INCREMENT BY 2");
    conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true");
    nextConnection();
    conn.createStatement().execute("CREATE INDEX idx ON t(v1) INCLUDE (v2)");
    nextConnection();
    PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
    stmt.optimizeQuery("SELECT k, NEXT VALUE FOR seq.perf FROM t WHERE v1 = 'bar'");
}
Also used : PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) Test(org.junit.Test)

Aggregations

PhoenixStatement (org.apache.phoenix.jdbc.PhoenixStatement)64 Connection (java.sql.Connection)47 Test (org.junit.Test)42 BaseConnectionlessQueryTest (org.apache.phoenix.query.BaseConnectionlessQueryTest)34 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)13 ResultSet (java.sql.ResultSet)10 StatementContext (org.apache.phoenix.compile.StatementContext)10 QueryPlan (org.apache.phoenix.compile.QueryPlan)8 PTable (org.apache.phoenix.schema.PTable)8 SQLException (java.sql.SQLException)7 Statement (java.sql.Statement)7 PhoenixResultSet (org.apache.phoenix.jdbc.PhoenixResultSet)7 PColumn (org.apache.phoenix.schema.PColumn)7 ArrayList (java.util.ArrayList)6 Properties (java.util.Properties)6 MutationState (org.apache.phoenix.execute.MutationState)6 PreparedStatement (java.sql.PreparedStatement)5 List (java.util.List)5 Scan (org.apache.hadoop.hbase.client.Scan)5 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)5