Search in sources :

Example 21 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class BaseResultIterators method initializeScan.

private static void initializeScan(QueryPlan plan, Integer perScanLimit, Integer offset, Scan scan) throws SQLException {
    StatementContext context = plan.getContext();
    TableRef tableRef = plan.getTableRef();
    PTable table = tableRef.getTable();
    Map<byte[], NavigableSet<byte[]>> familyMap = scan.getFamilyMap();
    // Hack for PHOENIX-2067 to force raw scan over all KeyValues to fix their row keys
    if (context.getConnection().isDescVarLengthRowKeyUpgrade()) {
        // We project *all* KeyValues across all column families as we make a pass over
        // a physical table and we want to make sure we catch all KeyValues that may be
        // dynamic or part of an updatable view.
        familyMap.clear();
        scan.setMaxVersions();
        // Remove any filter
        scan.setFilter(null);
        // Traverse (and subsequently clone) all KeyValues
        scan.setRaw(true);
        // Pass over PTable so we can re-write rows according to the row key schema
        scan.setAttribute(BaseScannerRegionObserver.UPGRADE_DESC_ROW_KEY, UngroupedAggregateRegionObserver.serialize(table));
    } else {
        FilterableStatement statement = plan.getStatement();
        RowProjector projector = plan.getProjector();
        boolean optimizeProjection = false;
        boolean keyOnlyFilter = familyMap.isEmpty() && context.getWhereConditionColumns().isEmpty();
        if (!projector.projectEverything()) {
            // not match the actual column families of the table (which is bad).
            if (keyOnlyFilter && table.getColumnFamilies().size() == 1) {
                // Project the one column family. We must project a column family since it's possible
                // that there are other non declared column families that we need to ignore.
                scan.addFamily(table.getColumnFamilies().get(0).getName().getBytes());
            } else {
                optimizeProjection = true;
                if (projector.projectEveryRow()) {
                    if (table.getViewType() == ViewType.MAPPED) {
                        // Since we don't have the empty key value in MAPPED tables, 
                        // we must project all CFs in HRS. However, only the
                        // selected column values are returned back to client.
                        context.getWhereConditionColumns().clear();
                        for (PColumnFamily family : table.getColumnFamilies()) {
                            context.addWhereConditionColumn(family.getName().getBytes(), null);
                        }
                    } else {
                        byte[] ecf = SchemaUtil.getEmptyColumnFamily(table);
                        // been projected in its entirety.
                        if (!familyMap.containsKey(ecf) || familyMap.get(ecf) != null) {
                            scan.addColumn(ecf, EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst());
                        }
                    }
                }
            }
        }
        // Add FirstKeyOnlyFilter if there are no references to key value columns
        if (keyOnlyFilter) {
            ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter());
        }
        if (perScanLimit != null) {
            ScanUtil.andFilterAtEnd(scan, new PageFilter(perScanLimit));
        }
        if (offset != null) {
            ScanUtil.addOffsetAttribute(scan, offset);
        }
        int cols = plan.getGroupBy().getOrderPreservingColumnCount();
        if (cols > 0 && keyOnlyFilter && !plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) && cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() && plan.getGroupBy().isOrderPreserving() && (context.getAggregationManager().isEmpty() || plan.getGroupBy().isUngroupedAggregate())) {
            ScanUtil.andFilterAtEnd(scan, new DistinctPrefixFilter(plan.getTableRef().getTable().getRowKeySchema(), cols));
            if (plan.getLimit() != null) {
                // We can push the limit to the server
                ScanUtil.andFilterAtEnd(scan, new PageFilter(plan.getLimit()));
            }
        }
        scan.setAttribute(BaseScannerRegionObserver.QUALIFIER_ENCODING_SCHEME, new byte[] { table.getEncodingScheme().getSerializedMetadataValue() });
        scan.setAttribute(BaseScannerRegionObserver.IMMUTABLE_STORAGE_ENCODING_SCHEME, new byte[] { table.getImmutableStorageScheme().getSerializedMetadataValue() });
        // we use this flag on the server side to determine which value column qualifier to use in the key value we return from server.
        scan.setAttribute(BaseScannerRegionObserver.USE_NEW_VALUE_COLUMN_QUALIFIER, Bytes.toBytes(true));
        // So there is no point setting the range.
        if (!ScanUtil.isAnalyzeTable(scan)) {
            setQualifierRanges(keyOnlyFilter, table, scan, context);
        }
        if (optimizeProjection) {
            optimizeProjection(context, scan, table, statement);
        }
    }
}
Also used : NavigableSet(java.util.NavigableSet) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) DistinctPrefixFilter(org.apache.phoenix.filter.DistinctPrefixFilter) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) StatementContext(org.apache.phoenix.compile.StatementContext) RowProjector(org.apache.phoenix.compile.RowProjector) FilterableStatement(org.apache.phoenix.parse.FilterableStatement) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) TableRef(org.apache.phoenix.schema.TableRef)

Example 22 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class IndexUtil method rewriteViewStatement.

/**
     * Rewrite a view statement to be valid against an index
     * @param conn
     * @param index
     * @param table
     * @return
     * @throws SQLException
     */
public static String rewriteViewStatement(PhoenixConnection conn, PTable index, PTable table, String viewStatement) throws SQLException {
    if (viewStatement == null) {
        return null;
    }
    SelectStatement select = new SQLParser(viewStatement).parseQuery();
    ColumnResolver resolver = FromCompiler.getResolver(new TableRef(table));
    SelectStatement translatedSelect = IndexStatementRewriter.translate(select, resolver);
    ParseNode whereNode = translatedSelect.getWhere();
    PhoenixStatement statement = new PhoenixStatement(conn);
    TableRef indexTableRef = new TableRef(index) {

        @Override
        public String getColumnDisplayName(ColumnRef ref, boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) {
            return '"' + ref.getColumn().getName().getString() + '"';
        }
    };
    ColumnResolver indexResolver = FromCompiler.getResolver(indexTableRef);
    StatementContext context = new StatementContext(statement, indexResolver);
    // Compile to ensure validity
    WhereCompiler.compile(context, whereNode);
    StringBuilder buf = new StringBuilder();
    whereNode.toSQL(indexResolver, buf);
    return QueryUtil.getViewStatement(index.getSchemaName().getString(), index.getTableName().getString(), buf.toString());
}
Also used : SelectStatement(org.apache.phoenix.parse.SelectStatement) SQLParser(org.apache.phoenix.parse.SQLParser) ParseNode(org.apache.phoenix.parse.ParseNode) ColumnRef(org.apache.phoenix.schema.ColumnRef) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) TableRef(org.apache.phoenix.schema.TableRef) StatementContext(org.apache.phoenix.compile.StatementContext)

Example 23 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class KeyValueUtil method getEstimatedRowSize.

/**
     * Estimates the storage size of a row
     * @param mutations map from table to row to RowMutationState
     * @return estimated row size
     */
public static long getEstimatedRowSize(Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> mutations) {
    long size = 0;
    // iterate over tables
    for (Entry<TableRef, Map<ImmutableBytesPtr, RowMutationState>> tableEntry : mutations.entrySet()) {
        PTable table = tableEntry.getKey().getTable();
        // iterate over rows
        for (Entry<ImmutableBytesPtr, RowMutationState> rowEntry : tableEntry.getValue().entrySet()) {
            int rowLength = rowEntry.getKey().getLength();
            Map<PColumn, byte[]> colValueMap = rowEntry.getValue().getColumnValues();
            switch(table.getImmutableStorageScheme()) {
                case ONE_CELL_PER_COLUMN:
                    // iterate over columns
                    for (Entry<PColumn, byte[]> colValueEntry : colValueMap.entrySet()) {
                        PColumn pColumn = colValueEntry.getKey();
                        size += KeyValue.getKeyValueDataStructureSize(rowLength, pColumn.getFamilyName().getBytes().length, pColumn.getColumnQualifierBytes().length, colValueEntry.getValue().length);
                    }
                    break;
                case SINGLE_CELL_ARRAY_WITH_OFFSETS:
                    // we store all the column values in a single key value that contains all the
                    // column values followed by an offset array
                    size += PArrayDataTypeEncoder.getEstimatedByteSize(table, rowLength, colValueMap);
                    break;
            }
            // count the empty key value
            Pair<byte[], byte[]> emptyKeyValueInfo = EncodedColumnsUtil.getEmptyKeyValueInfo(table);
            size += KeyValue.getKeyValueDataStructureSize(rowLength, SchemaUtil.getEmptyColumnFamilyPtr(table).getLength(), emptyKeyValueInfo.getFirst().length, emptyKeyValueInfo.getSecond().length);
        }
    }
    return size;
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) Map(java.util.Map) TableRef(org.apache.phoenix.schema.TableRef) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 24 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class WhereCompilerTest method testTenantConstraintsAddedToScan.

@Test
public void testTenantConstraintsAddedToScan() throws SQLException {
    String tenantTypeId = "5678";
    String tenantId = "000000000000123";
    String url = getUrl(tenantId);
    createTestTable(getUrl(), "create table base_table_for_tenant_filter_test (tenant_id char(15) not null, type_id char(4) not null, " + "id char(5) not null, a_integer integer, a_string varchar(100) constraint pk primary key (tenant_id, type_id, id)) multi_tenant=true");
    createTestTable(url, "create view tenant_filter_test (tenant_col integer) AS SELECT * FROM BASE_TABLE_FOR_TENANT_FILTER_TEST WHERE type_id= '" + tenantTypeId + "'");
    String query = "select * from tenant_filter_test where a_integer=0 and a_string='foo'";
    PhoenixConnection pconn = DriverManager.getConnection(url, PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
    QueryPlan plan = pstmt.optimizeQuery();
    Scan scan = plan.getContext().getScan();
    Filter filter = scan.getFilter();
    PTable table = plan.getTableRef().getTable();
    Expression aInteger = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_INTEGER").getPosition()).newColumnExpression();
    Expression aString = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_STRING").getPosition()).newColumnExpression();
    assertEquals(multiEncodedKVFilter(and(constantComparison(CompareOp.EQUAL, aInteger, 0), constantComparison(CompareOp.EQUAL, aString, "foo")), TWO_BYTE_QUALIFIERS), filter);
    byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId + tenantTypeId);
    assertArrayEquals(startRow, scan.getStartRow());
    byte[] stopRow = startRow;
    assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow());
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) RowKeyComparisonFilter(org.apache.phoenix.filter.RowKeyComparisonFilter) TestUtil.multiEncodedKVFilter(org.apache.phoenix.util.TestUtil.multiEncodedKVFilter) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) Filter(org.apache.hadoop.hbase.filter.Filter) TestUtil.singleKVFilter(org.apache.phoenix.util.TestUtil.singleKVFilter) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) Scan(org.apache.hadoop.hbase.client.Scan) ColumnRef(org.apache.phoenix.schema.ColumnRef) PhoenixPreparedStatement(org.apache.phoenix.jdbc.PhoenixPreparedStatement) PTable(org.apache.phoenix.schema.PTable) TableRef(org.apache.phoenix.schema.TableRef) Test(org.junit.Test) BaseConnectionlessQueryTest(org.apache.phoenix.query.BaseConnectionlessQueryTest)

Example 25 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class BaseConnectionlessQueryTest method doSetup.

@BeforeClass
public static void doSetup() throws Exception {
    startServer(getUrl());
    ensureTableCreated(getUrl(), ATABLE_NAME);
    ensureTableCreated(getUrl(), ENTITY_HISTORY_TABLE_NAME);
    ensureTableCreated(getUrl(), FUNKY_NAME);
    ensureTableCreated(getUrl(), PTSDB_NAME);
    ensureTableCreated(getUrl(), PTSDB2_NAME);
    ensureTableCreated(getUrl(), PTSDB3_NAME);
    ensureTableCreated(getUrl(), MULTI_CF_NAME);
    ensureTableCreated(getUrl(), TABLE_WITH_ARRAY);
    Properties props = new Properties();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(HConstants.LATEST_TIMESTAMP));
    PhoenixConnection conn = DriverManager.getConnection(PHOENIX_CONNECTIONLESS_JDBC_URL, props).unwrap(PhoenixConnection.class);
    try {
        PTable table = conn.getTable(new PTableKey(null, ATABLE_NAME));
        ATABLE = table;
        ORGANIZATION_ID = new ColumnRef(new TableRef(table), table.getColumnForColumnName("ORGANIZATION_ID").getPosition()).newColumnExpression();
        ENTITY_ID = new ColumnRef(new TableRef(table), table.getColumnForColumnName("ENTITY_ID").getPosition()).newColumnExpression();
        A_INTEGER = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_INTEGER").getPosition()).newColumnExpression();
        A_STRING = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_STRING").getPosition()).newColumnExpression();
        B_STRING = new ColumnRef(new TableRef(table), table.getColumnForColumnName("B_STRING").getPosition()).newColumnExpression();
        A_DATE = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_DATE").getPosition()).newColumnExpression();
        A_TIME = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_TIME").getPosition()).newColumnExpression();
        A_TIMESTAMP = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_TIMESTAMP").getPosition()).newColumnExpression();
        X_DECIMAL = new ColumnRef(new TableRef(table), table.getColumnForColumnName("X_DECIMAL").getPosition()).newColumnExpression();
    } finally {
        conn.close();
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ColumnRef(org.apache.phoenix.schema.ColumnRef) Properties(java.util.Properties) PTableKey(org.apache.phoenix.schema.PTableKey) PTable(org.apache.phoenix.schema.PTable) TableRef(org.apache.phoenix.schema.TableRef) BeforeClass(org.junit.BeforeClass)

Aggregations

TableRef (org.apache.phoenix.schema.TableRef)43 PTable (org.apache.phoenix.schema.PTable)30 PColumn (org.apache.phoenix.schema.PColumn)16 Expression (org.apache.phoenix.expression.Expression)14 SQLException (java.sql.SQLException)13 ColumnRef (org.apache.phoenix.schema.ColumnRef)13 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)12 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)12 Scan (org.apache.hadoop.hbase.client.Scan)11 ParseNode (org.apache.phoenix.parse.ParseNode)11 SelectStatement (org.apache.phoenix.parse.SelectStatement)10 ArrayList (java.util.ArrayList)9 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)9 PTableRef (org.apache.phoenix.schema.PTableRef)8 List (java.util.List)7 Map (java.util.Map)7 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)7 Hint (org.apache.phoenix.parse.HintNode.Hint)7 Tuple (org.apache.phoenix.schema.tuple.Tuple)6 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)5