Search in sources :

Example 56 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project phoenix by apache.

the class WhereCompilerTest method testOrPKWithAndPKAndNotPK.

@Test
public void testOrPKWithAndPKAndNotPK() throws SQLException {
    String query = "select * from bugTable where ID = 'i1' or (ID = 'i2' and company = 'c3')";
    PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    pconn.createStatement().execute("create table bugTable(ID varchar primary key,company varchar)");
    PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
    QueryPlan plan = pstmt.optimizeQuery();
    Scan scan = plan.getContext().getScan();
    Filter filter = scan.getFilter();
    Expression idExpression = new ColumnRef(plan.getTableRef(), plan.getTableRef().getTable().getColumnForColumnName("ID").getPosition()).newColumnExpression();
    Expression id = new RowKeyColumnExpression(idExpression, new RowKeyValueAccessor(plan.getTableRef().getTable().getPKColumns(), 0));
    Expression company = new KeyValueColumnExpression(plan.getTableRef().getTable().getColumnForColumnName("COMPANY"));
    // FilterList has no equals implementation
    assertTrue(filter instanceof FilterList);
    FilterList filterList = (FilterList) filter;
    assertEquals(FilterList.Operator.MUST_PASS_ALL, filterList.getOperator());
    assertEquals(Arrays.asList(new SkipScanFilter(ImmutableList.of(Arrays.asList(pointRange("i1"), pointRange("i2"))), SchemaUtil.VAR_BINARY_SCHEMA), singleKVFilter(or(constantComparison(CompareOp.EQUAL, id, "i1"), and(constantComparison(CompareOp.EQUAL, id, "i2"), constantComparison(CompareOp.EQUAL, company, "c3"))))), filterList.getFilters());
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) RowKeyValueAccessor(org.apache.phoenix.schema.RowKeyValueAccessor) FilterList(org.apache.hadoop.hbase.filter.FilterList) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) RowKeyComparisonFilter(org.apache.phoenix.filter.RowKeyComparisonFilter) TestUtil.multiEncodedKVFilter(org.apache.phoenix.util.TestUtil.multiEncodedKVFilter) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) Filter(org.apache.hadoop.hbase.filter.Filter) TestUtil.singleKVFilter(org.apache.phoenix.util.TestUtil.singleKVFilter) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) Scan(org.apache.hadoop.hbase.client.Scan) ColumnRef(org.apache.phoenix.schema.ColumnRef) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) PhoenixPreparedStatement(org.apache.phoenix.jdbc.PhoenixPreparedStatement) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) Test(org.junit.Test) BaseConnectionlessQueryTest(org.apache.phoenix.query.BaseConnectionlessQueryTest)

Example 57 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project phoenix by apache.

the class ScannerBuilder method buildIndexedColumnScanner.

public Scanner buildIndexedColumnScanner(Collection<? extends ColumnReference> indexedColumns, ColumnTracker tracker, long ts, boolean returnNullIfRowNotFound) {
    Filter columnFilters = getColumnFilters(indexedColumns);
    FilterList filters = new FilterList(Lists.newArrayList(columnFilters));
    // skip to the right TS. This needs to come before the deletes since the deletes will hide any
    // state that comes before the actual kvs, so we need to capture those TS as they change the row
    // state.
    filters.addFilter(new ColumnTrackingNextLargestTimestampFilter(ts, tracker));
    // filter out kvs based on deletes
    filters.addFilter(new ApplyAndFilterDeletesFilter(getAllFamilies(indexedColumns)));
    // combine the family filters and the rest of the filters as a
    return getFilteredScanner(filters, returnNullIfRowNotFound);
}
Also used : ColumnTrackingNextLargestTimestampFilter(org.apache.phoenix.hbase.index.covered.filter.ColumnTrackingNextLargestTimestampFilter) FamilyFilter(org.apache.hadoop.hbase.filter.FamilyFilter) QualifierFilter(org.apache.hadoop.hbase.filter.QualifierFilter) Filter(org.apache.hadoop.hbase.filter.Filter) ApplyAndFilterDeletesFilter(org.apache.phoenix.hbase.index.covered.filter.ApplyAndFilterDeletesFilter) ApplyAndFilterDeletesFilter(org.apache.phoenix.hbase.index.covered.filter.ApplyAndFilterDeletesFilter) FilterList(org.apache.hadoop.hbase.filter.FilterList) ColumnTrackingNextLargestTimestampFilter(org.apache.phoenix.hbase.index.covered.filter.ColumnTrackingNextLargestTimestampFilter)

Example 58 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project phoenix by apache.

the class WhereOptimizerTest method testForceSkipScanOnSaltedTable.

@Test
public void testForceSkipScanOnSaltedTable() throws SQLException {
    Connection conn = DriverManager.getConnection(getUrl());
    conn.createStatement().execute("CREATE TABLE IF NOT EXISTS user_messages (\n" + "        SENDER_ID UNSIGNED_LONG NOT NULL,\n" + "        RECIPIENT_ID UNSIGNED_LONG NOT NULL,\n" + "        SENDER_IP VARCHAR,\n" + "        IS_READ VARCHAR,\n" + "        IS_DELETED VARCHAR,\n" + "        M_TEXT VARCHAR,\n" + "        M_TIMESTAMP timestamp  NOT NULL,\n" + "        ROW_ID UNSIGNED_LONG NOT NULL\n" + "        constraint rowkey primary key (SENDER_ID,RECIPIENT_ID,M_TIMESTAMP DESC,ROW_ID))\n" + "SALT_BUCKETS=12\n");
    String query = "select /*+ SKIP_SCAN */ count(*) from user_messages where is_read='N' and recipient_id=5399179882";
    StatementContext context = compileStatement(query);
    Scan scan = context.getScan();
    Filter filter = scan.getFilter();
    assertNotNull(filter);
    assertTrue(filter instanceof FilterList);
    FilterList filterList = (FilterList) filter;
    assertEquals(FilterList.Operator.MUST_PASS_ALL, filterList.getOperator());
    assertEquals(2, filterList.getFilters().size());
    assertTrue(filterList.getFilters().get(0) instanceof SkipScanFilter);
    assertTrue(filterList.getFilters().get(1) instanceof SingleKeyValueComparisonFilter);
    ScanRanges scanRanges = context.getScanRanges();
    assertNotNull(scanRanges);
    assertEquals(3, scanRanges.getRanges().size());
    assertEquals(1, scanRanges.getRanges().get(1).size());
    assertEquals(KeyRange.EVERYTHING_RANGE, scanRanges.getRanges().get(1).get(0));
    assertEquals(1, scanRanges.getRanges().get(2).size());
    assertTrue(scanRanges.getRanges().get(2).get(0).isSingleKey());
    assertEquals(Long.valueOf(5399179882L), PUnsignedLong.INSTANCE.toObject(scanRanges.getRanges().get(2).get(0).getLowerRange()));
}
Also used : RowKeyComparisonFilter(org.apache.phoenix.filter.RowKeyComparisonFilter) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleKeyValueComparisonFilter(org.apache.phoenix.filter.SingleKeyValueComparisonFilter) TestUtil.rowKeyFilter(org.apache.phoenix.util.TestUtil.rowKeyFilter) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SingleKeyValueComparisonFilter(org.apache.phoenix.filter.SingleKeyValueComparisonFilter) Scan(org.apache.hadoop.hbase.client.Scan) FilterList(org.apache.hadoop.hbase.filter.FilterList) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) Test(org.junit.Test) BaseConnectionlessQueryTest(org.apache.phoenix.query.BaseConnectionlessQueryTest)

Example 59 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project phoenix by apache.

the class MetaDataEndpointImpl method findChildViews_deprecated.

// TODO remove this in 4.13 release 
@Deprecated
private TableViewFinder findChildViews_deprecated(Region region, byte[] tenantId, PTable table, byte[] linkTypeBytes) throws IOException {
    byte[] schemaName = table.getSchemaName().getBytes();
    byte[] tableName = table.getTableName().getBytes();
    boolean isMultiTenant = table.isMultiTenant();
    Scan scan = new Scan();
    // the same tenantId.
    if (!isMultiTenant) {
        byte[] startRow = ByteUtil.concat(tenantId, QueryConstants.SEPARATOR_BYTE_ARRAY);
        byte[] stopRow = ByteUtil.nextKey(startRow);
        scan.setStartRow(startRow);
        scan.setStopRow(stopRow);
    }
    SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareOp.EQUAL, linkTypeBytes);
    SingleColumnValueFilter tableTypeFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES, CompareOp.EQUAL, PTableType.VIEW.getSerializedValue().getBytes());
    tableTypeFilter.setFilterIfMissing(false);
    linkFilter.setFilterIfMissing(true);
    byte[] suffix = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, SchemaUtil.getPhysicalTableName(SchemaUtil.getTableNameAsBytes(schemaName, tableName), table.isNamespaceMapped()).getName());
    SuffixFilter rowFilter = new SuffixFilter(suffix);
    FilterList filter = new FilterList(linkFilter, tableTypeFilter, rowFilter);
    scan.setFilter(filter);
    scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES);
    scan.addColumn(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
    scan.addColumn(TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
    // Original region-only scanner modified due to PHOENIX-1208
    // RegionScanner scanner = region.getScanner(scan);
    // The following *should* work, but doesn't due to HBASE-11837
    // TableName systemCatalogTableName = region.getTableDesc().getTableName();
    // HTableInterface hTable = env.getTable(systemCatalogTableName);
    // These deprecated calls work around the issue
    HTableInterface hTable = ServerUtil.getHTableForCoprocessorScan(env, region.getTableDesc().getTableName().getName());
    try {
        boolean allViewsInCurrentRegion = true;
        int numOfChildViews = 0;
        List<ViewInfo> viewInfoList = Lists.newArrayList();
        ResultScanner scanner = hTable.getScanner(scan);
        try {
            for (Result result = scanner.next(); (result != null); result = scanner.next()) {
                numOfChildViews++;
                ImmutableBytesWritable ptr = new ImmutableBytesWritable();
                ResultTuple resultTuple = new ResultTuple(result);
                resultTuple.getKey(ptr);
                byte[] key = ptr.copyBytes();
                if (checkTableKeyInRegion(key, region) != null) {
                    allViewsInCurrentRegion = false;
                }
                byte[][] rowKeyMetaData = new byte[3][];
                getVarChars(result.getRow(), 3, rowKeyMetaData);
                byte[] viewTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
                byte[] viewSchemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
                byte[] viewName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
                viewInfoList.add(new ViewInfo(viewTenantId, viewSchemaName, viewName));
            }
            TableViewFinder tableViewFinderResult = new TableViewFinder(viewInfoList);
            if (numOfChildViews > 0 && !allViewsInCurrentRegion) {
                tableViewFinderResult.setAllViewsNotInSingleRegion();
            }
            return tableViewFinderResult;
        } finally {
            scanner.close();
        }
    } finally {
        hTable.close();
    }
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) FilterList(org.apache.hadoop.hbase.filter.FilterList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) Result(org.apache.hadoop.hbase.client.Result) Scan(org.apache.hadoop.hbase.client.Scan)

Example 60 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project phoenix by apache.

the class ScanRanges method intersectScan.

public Scan intersectScan(Scan scan, final byte[] originalStartKey, final byte[] originalStopKey, final int keyOffset, boolean crossesRegionBoundary) {
    byte[] startKey = originalStartKey;
    byte[] stopKey = originalStopKey;
    if (stopKey.length > 0 && Bytes.compareTo(startKey, stopKey) >= 0) {
        return null;
    }
    // Keep the keys as they are if we have a point lookup, as we've already resolved the
    // salt bytes in that case.
    final int scanKeyOffset = this.isSalted && !this.isPointLookup ? SaltingUtil.NUM_SALTING_BYTES : 0;
    assert (scanKeyOffset == 0 || keyOffset == 0);
    // Total offset for startKey/stopKey. Either 1 for salted tables or the prefix length
    // of the current region for local indexes. We'll never have a case where a table is
    // both salted and local.
    final int totalKeyOffset = scanKeyOffset + keyOffset;
    byte[] prefixBytes = ByteUtil.EMPTY_BYTE_ARRAY;
    if (totalKeyOffset > 0) {
        prefixBytes = ScanUtil.getPrefix(startKey, totalKeyOffset);
        /*
             * If our startKey to stopKey crosses a region boundary consider everything after the startKey as our scan
             * is always done within a single region. This prevents us from having to prefix the key prior to knowing
             * whether or not there may be an intersection. We can't calculate whether or not we've crossed a region
             * boundary for local indexes, because we don't know the key offset of the next region, but only for the
             * current one (which is the one passed in). If the next prefix happened to be a subset of the previous
             * prefix, then this wouldn't detect that we crossed a region boundary.
             */
        if (crossesRegionBoundary) {
            stopKey = ByteUtil.EMPTY_BYTE_ARRAY;
        }
    }
    int scanStartKeyOffset = scanKeyOffset;
    byte[] scanStartKey = scan == null ? this.scanRange.getLowerRange() : scan.getStartRow();
    // Compare ignoring key prefix and salt byte
    if (scanStartKey.length - scanKeyOffset > 0) {
        if (startKey.length - totalKeyOffset > 0) {
            if (Bytes.compareTo(scanStartKey, scanKeyOffset, scanStartKey.length - scanKeyOffset, startKey, totalKeyOffset, startKey.length - totalKeyOffset) < 0) {
                scanStartKey = startKey;
                scanStartKeyOffset = totalKeyOffset;
            }
        }
    } else {
        scanStartKey = startKey;
        scanStartKeyOffset = totalKeyOffset;
    }
    int scanStopKeyOffset = scanKeyOffset;
    byte[] scanStopKey = scan == null ? this.scanRange.getUpperRange() : scan.getStopRow();
    if (scanStopKey.length - scanKeyOffset > 0) {
        if (stopKey.length - totalKeyOffset > 0) {
            if (Bytes.compareTo(scanStopKey, scanKeyOffset, scanStopKey.length - scanKeyOffset, stopKey, totalKeyOffset, stopKey.length - totalKeyOffset) > 0) {
                scanStopKey = stopKey;
                scanStopKeyOffset = totalKeyOffset;
            }
        }
    } else {
        scanStopKey = stopKey;
        scanStopKeyOffset = totalKeyOffset;
    }
    // If not scanning anything, return null
    if (scanStopKey.length - scanStopKeyOffset > 0 && Bytes.compareTo(scanStartKey, scanStartKeyOffset, scanStartKey.length - scanStartKeyOffset, scanStopKey, scanStopKeyOffset, scanStopKey.length - scanStopKeyOffset) >= 0) {
        return null;
    }
    if (originalStopKey.length != 0 && scanStopKey.length == 0) {
        scanStopKey = originalStopKey;
    }
    Filter newFilter = null;
    // start/stop or the scanRanges start/stop.
    if (this.useSkipScanFilter()) {
        byte[] skipScanStartKey = scanStartKey;
        byte[] skipScanStopKey = scanStopKey;
        // we need to remove the prefix before running our intersect method.
        if (scanKeyOffset > 0) {
            if (skipScanStartKey != originalStartKey) {
                // original already has correct salt byte
                skipScanStartKey = replaceSaltByte(skipScanStartKey, prefixBytes);
            }
            if (skipScanStopKey != originalStopKey) {
                skipScanStopKey = replaceSaltByte(skipScanStopKey, prefixBytes);
            }
        } else if (keyOffset > 0) {
            if (skipScanStartKey == originalStartKey) {
                skipScanStartKey = stripPrefix(skipScanStartKey, keyOffset);
            }
            if (skipScanStopKey == originalStopKey) {
                skipScanStopKey = stripPrefix(skipScanStopKey, keyOffset);
            }
        }
        if (scan == null) {
            return filter.hasIntersect(skipScanStartKey, skipScanStopKey) ? HAS_INTERSECTION : null;
        }
        Filter filter = scan.getFilter();
        SkipScanFilter newSkipScanFilter = null;
        if (filter instanceof SkipScanFilter) {
            SkipScanFilter oldSkipScanFilter = (SkipScanFilter) filter;
            newFilter = newSkipScanFilter = oldSkipScanFilter.intersect(skipScanStartKey, skipScanStopKey);
            if (newFilter == null) {
                return null;
            }
        } else if (filter instanceof FilterList) {
            FilterList oldList = (FilterList) filter;
            FilterList newList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
            newFilter = newList;
            for (Filter f : oldList.getFilters()) {
                if (f instanceof SkipScanFilter) {
                    newSkipScanFilter = ((SkipScanFilter) f).intersect(skipScanStartKey, skipScanStopKey);
                    if (newSkipScanFilter == null) {
                        return null;
                    }
                    newList.addFilter(newSkipScanFilter);
                } else {
                    newList.addFilter(f);
                }
            }
        }
        // have an enclosing range when we do a point lookup.
        if (isPointLookup) {
            scanStartKey = ScanUtil.getMinKey(schema, newSkipScanFilter.getSlots(), slotSpan);
            scanStopKey = ScanUtil.getMaxKey(schema, newSkipScanFilter.getSlots(), slotSpan);
        }
    }
    // If we've got this far, we know we have an intersection
    if (scan == null) {
        return HAS_INTERSECTION;
    }
    if (newFilter == null) {
        newFilter = scan.getFilter();
    }
    Scan newScan = ScanUtil.newScan(scan);
    newScan.setFilter(newFilter);
    // for it.
    if (totalKeyOffset > 0) {
        if (scanStartKey != originalStartKey) {
            scanStartKey = prefixKey(scanStartKey, scanKeyOffset, prefixBytes, keyOffset);
        }
        if (scanStopKey != originalStopKey) {
            scanStopKey = prefixKey(scanStopKey, scanKeyOffset, prefixBytes, keyOffset);
        }
    }
    // Don't let the stopRow of the scan go beyond the originalStopKey
    if (originalStopKey.length > 0 && Bytes.compareTo(scanStopKey, originalStopKey) > 0) {
        scanStopKey = originalStopKey;
    }
    if (scanStopKey.length > 0 && Bytes.compareTo(scanStartKey, scanStopKey) >= 0) {
        return null;
    }
    newScan.setAttribute(SCAN_ACTUAL_START_ROW, scanStartKey);
    newScan.setStartRow(scanStartKey);
    newScan.setStopRow(scanStopKey);
    return newScan;
}
Also used : SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) Filter(org.apache.hadoop.hbase.filter.Filter) FilterList(org.apache.hadoop.hbase.filter.FilterList) Scan(org.apache.hadoop.hbase.client.Scan) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter)

Aggregations

FilterList (org.apache.hadoop.hbase.filter.FilterList)68 Filter (org.apache.hadoop.hbase.filter.Filter)36 Scan (org.apache.hadoop.hbase.client.Scan)16 QualifierFilter (org.apache.hadoop.hbase.filter.QualifierFilter)10 SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)10 TimelineFilterList (org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList)10 BinaryComparator (org.apache.hadoop.hbase.filter.BinaryComparator)9 Test (org.junit.Test)8 ConsumerConfig (co.cask.cdap.data2.queue.ConsumerConfig)7 ArrayList (java.util.ArrayList)7 FamilyFilter (org.apache.hadoop.hbase.filter.FamilyFilter)7 Transaction (org.apache.tephra.Transaction)7 IOException (java.io.IOException)6 PrefixFilter (org.apache.hadoop.hbase.filter.PrefixFilter)6 Result (org.apache.hadoop.hbase.client.Result)5 PageFilter (org.apache.hadoop.hbase.filter.PageFilter)5 Cell (org.apache.hadoop.hbase.Cell)4 TableName (org.apache.hadoop.hbase.TableName)4 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)4 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)4