Search in sources :

Example 41 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class ScanPlan method newIterator.

@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    // Set any scan attributes before creating the scanner, as it will be too late afterwards
    scan.setAttribute(BaseScannerRegionObserver.NON_AGGREGATE_QUERY, QueryConstants.TRUE);
    ResultIterator scanner;
    TableRef tableRef = this.getTableRef();
    PTable table = tableRef.getTable();
    boolean isSalted = table.getBucketNum() != null;
    /* If no limit or topN, use parallel iterator so that we get results faster. Otherwise, if
         * limit is provided, run query serially.
         */
    boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
    Integer perScanLimit = !allowPageFilter || isOrdered ? null : QueryUtil.getOffsetLimit(limit, offset);
    boolean isOffsetOnServer = isOffsetPossibleOnServer(context, orderBy, offset, isSalted, table.getIndexType());
    /*
         * For queries that are doing a row key order by and are not possibly querying more than a
         * threshold worth of data, then we only need to initialize scanners corresponding to the
         * first (or last, if reverse) scan per region.
         */
    boolean initFirstScanOnly = (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) && isDataToScanWithinThreshold;
    BaseResultIterators iterators;
    if (isOffsetOnServer) {
        iterators = new SerialIterators(this, perScanLimit, offset, parallelIteratorFactory, scanGrouper, scan);
    } else if (isSerial) {
        iterators = new SerialIterators(this, perScanLimit, null, parallelIteratorFactory, scanGrouper, scan);
    } else {
        iterators = new ParallelIterators(this, perScanLimit, parallelIteratorFactory, scanGrouper, scan, initFirstScanOnly);
    }
    estimatedRows = iterators.getEstimatedRowCount();
    estimatedSize = iterators.getEstimatedByteCount();
    splits = iterators.getSplits();
    scans = iterators.getScans();
    if (isOffsetOnServer) {
        scanner = new ConcatResultIterator(iterators);
        if (limit != null) {
            scanner = new LimitingResultIterator(scanner, limit);
        }
    } else if (isOrdered) {
        scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions());
    } else {
        if ((isSalted || table.getIndexType() == IndexType.LOCAL) && ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, context)) {
            /*
                 * For salted tables or local index, a merge sort is needed if: 
                 * 1) The config phoenix.query.force.rowkeyorder is set to true 
                 * 2) Or if the query has an order by that wants to sort
                 * the results by the row key (forward or reverse ordering)
                 */
            scanner = new MergeSortRowKeyResultIterator(iterators, isSalted ? SaltingUtil.NUM_SALTING_BYTES : 0, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY);
        } else if (useRoundRobinIterator()) {
            /*
                 * For any kind of tables, round robin is possible if there is
                 * no ordering of rows needed.
                 */
            scanner = new RoundRobinResultIterator(iterators, this);
        } else {
            scanner = new ConcatResultIterator(iterators);
        }
        if (offset != null) {
            scanner = new OffsetResultIterator(scanner, offset);
        }
        if (limit != null) {
            scanner = new LimitingResultIterator(scanner, limit);
        }
    }
    if (context.getSequenceManager().getSequenceCount() > 0) {
        scanner = new SequenceResultIterator(scanner, context.getSequenceManager());
    }
    return scanner;
}
Also used : ParallelIterators(org.apache.phoenix.iterate.ParallelIterators) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) SerialIterators(org.apache.phoenix.iterate.SerialIterators) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) RoundRobinResultIterator(org.apache.phoenix.iterate.RoundRobinResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) ChunkedResultIterator(org.apache.phoenix.iterate.ChunkedResultIterator) BaseResultIterators(org.apache.phoenix.iterate.BaseResultIterators) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) RoundRobinResultIterator(org.apache.phoenix.iterate.RoundRobinResultIterator) PTable(org.apache.phoenix.schema.PTable) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) TableRef(org.apache.phoenix.schema.TableRef)

Example 42 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class MutationState method joinMutationState.

private void joinMutationState(Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> srcMutations, Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> dstMutations) {
    // Merge newMutation with this one, keeping state from newMutation for any overlaps
    for (Map.Entry<TableRef, Map<ImmutableBytesPtr, RowMutationState>> entry : srcMutations.entrySet()) {
        // Replace existing entries for the table with new entries
        TableRef tableRef = entry.getKey();
        Map<ImmutableBytesPtr, RowMutationState> srcRows = entry.getValue();
        joinMutationState(tableRef, srcRows, dstMutations);
    }
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Map(java.util.Map) PTableRef(org.apache.phoenix.schema.PTableRef) TableRef(org.apache.phoenix.schema.TableRef)

Example 43 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class WhereCompilerTest method testTenantConstraintsAddedToScanWithNullTenantTypeId.

@Test
public void testTenantConstraintsAddedToScanWithNullTenantTypeId() throws SQLException {
    String tenantId = "000000000000123";
    createTestTable(getUrl(), "create table base_table_for_tenant_filter_test (tenant_id char(15) not null, " + "id char(5) not null, a_integer integer, a_string varchar(100) constraint pk primary key (tenant_id, id)) multi_tenant=true");
    createTestTable(getUrl(tenantId), "create view tenant_filter_test (tenant_col integer) AS SELECT * FROM BASE_TABLE_FOR_TENANT_FILTER_TEST");
    String query = "select * from tenant_filter_test where a_integer=0 and a_string='foo'";
    PhoenixConnection pconn = DriverManager.getConnection(getUrl(tenantId), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
    QueryPlan plan = pstmt.optimizeQuery();
    Scan scan = plan.getContext().getScan();
    Filter filter = scan.getFilter();
    PTable table = plan.getTableRef().getTable();
    Expression aInteger = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_INTEGER").getPosition()).newColumnExpression();
    Expression aString = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_STRING").getPosition()).newColumnExpression();
    assertEquals(multiEncodedKVFilter(and(constantComparison(CompareOp.EQUAL, aInteger, 0), constantComparison(CompareOp.EQUAL, aString, "foo")), TWO_BYTE_QUALIFIERS), filter);
    byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId);
    assertArrayEquals(startRow, scan.getStartRow());
    byte[] stopRow = startRow;
    assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow());
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) RowKeyComparisonFilter(org.apache.phoenix.filter.RowKeyComparisonFilter) TestUtil.multiEncodedKVFilter(org.apache.phoenix.util.TestUtil.multiEncodedKVFilter) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) Filter(org.apache.hadoop.hbase.filter.Filter) TestUtil.singleKVFilter(org.apache.phoenix.util.TestUtil.singleKVFilter) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) Scan(org.apache.hadoop.hbase.client.Scan) ColumnRef(org.apache.phoenix.schema.ColumnRef) PhoenixPreparedStatement(org.apache.phoenix.jdbc.PhoenixPreparedStatement) PTable(org.apache.phoenix.schema.PTable) TableRef(org.apache.phoenix.schema.TableRef) Test(org.junit.Test) BaseConnectionlessQueryTest(org.apache.phoenix.query.BaseConnectionlessQueryTest)

Aggregations

TableRef (org.apache.phoenix.schema.TableRef)43 PTable (org.apache.phoenix.schema.PTable)30 PColumn (org.apache.phoenix.schema.PColumn)16 Expression (org.apache.phoenix.expression.Expression)14 SQLException (java.sql.SQLException)13 ColumnRef (org.apache.phoenix.schema.ColumnRef)13 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)12 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)12 Scan (org.apache.hadoop.hbase.client.Scan)11 ParseNode (org.apache.phoenix.parse.ParseNode)11 SelectStatement (org.apache.phoenix.parse.SelectStatement)10 ArrayList (java.util.ArrayList)9 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)9 PTableRef (org.apache.phoenix.schema.PTableRef)8 List (java.util.List)7 Map (java.util.Map)7 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)7 Hint (org.apache.phoenix.parse.HintNode.Hint)7 Tuple (org.apache.phoenix.schema.tuple.Tuple)6 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)5