Search in sources :

Example 6 with ScanRanges

use of org.apache.phoenix.compile.ScanRanges in project phoenix by apache.

the class MetaDataEndpointImpl method buildSchemas.

private List<PSchema> buildSchemas(List<byte[]> keys, Region region, long clientTimeStamp, ImmutableBytesPtr cacheKey) throws IOException, SQLException {
    List<KeyRange> keyRanges = Lists.newArrayListWithExpectedSize(keys.size());
    for (byte[] key : keys) {
        byte[] stopKey = ByteUtil.concat(key, QueryConstants.SEPARATOR_BYTE_ARRAY);
        ByteUtil.nextKey(stopKey, stopKey.length);
        keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, false));
    }
    Scan scan = new Scan();
    scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
    ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
    scanRanges.initializeScan(scan);
    scan.setFilter(scanRanges.getSkipScanFilter());
    RegionScanner scanner = region.getScanner(scan);
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    List<PSchema> schemas = new ArrayList<PSchema>();
    PSchema schema = null;
    try {
        for (int i = 0; i < keys.size(); i++) {
            schema = null;
            schema = getSchema(scanner, clientTimeStamp);
            if (schema == null) {
                return null;
            }
            metaDataCache.put(cacheKey, schema);
            schemas.add(schema);
        }
        return schemas;
    } finally {
        scanner.close();
    }
}
Also used : KeyRange(org.apache.phoenix.query.KeyRange) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) PSchema(org.apache.phoenix.parse.PSchema) ScanRanges(org.apache.phoenix.compile.ScanRanges) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Scan(org.apache.hadoop.hbase.client.Scan)

Example 7 with ScanRanges

use of org.apache.phoenix.compile.ScanRanges in project phoenix by apache.

the class BaseResultIterators method getParallelScans.

/**
     * Compute the list of parallel scans to run for a given query. The inner scans
     * may be concatenated together directly, while the other ones may need to be
     * merge sorted, depending on the query.
     * @return list of parallel scans to run for a given query.
     * @throws SQLException
     */
private List<List<Scan>> getParallelScans(byte[] startKey, byte[] stopKey) throws SQLException {
    List<HRegionLocation> regionLocations = getRegionBoundaries(scanGrouper);
    List<byte[]> regionBoundaries = toBoundaries(regionLocations);
    ScanRanges scanRanges = context.getScanRanges();
    PTable table = getTable();
    boolean isSalted = table.getBucketNum() != null;
    boolean isLocalIndex = table.getIndexType() == IndexType.LOCAL;
    GuidePostsInfo gps = getGuidePosts();
    hasGuidePosts = gps != GuidePostsInfo.NO_GUIDEPOST;
    boolean traverseAllRegions = isSalted || isLocalIndex;
    if (!traverseAllRegions) {
        byte[] scanStartRow = scan.getStartRow();
        if (scanStartRow.length != 0 && Bytes.compareTo(scanStartRow, startKey) > 0) {
            startKey = scanStartRow;
        }
        byte[] scanStopRow = scan.getStopRow();
        if (stopKey.length == 0 || (scanStopRow.length != 0 && Bytes.compareTo(scanStopRow, stopKey) < 0)) {
            stopKey = scanStopRow;
        }
    }
    int regionIndex = 0;
    int stopIndex = regionBoundaries.size();
    if (startKey.length > 0) {
        regionIndex = getIndexContainingInclusive(regionBoundaries, startKey);
    }
    if (stopKey.length > 0) {
        stopIndex = Math.min(stopIndex, regionIndex + getIndexContainingExclusive(regionBoundaries.subList(regionIndex, stopIndex), stopKey));
        if (isLocalIndex) {
            stopKey = regionLocations.get(stopIndex).getRegionInfo().getEndKey();
        }
    }
    List<List<Scan>> parallelScans = Lists.newArrayListWithExpectedSize(stopIndex - regionIndex + 1);
    ImmutableBytesWritable currentKey = new ImmutableBytesWritable(startKey);
    int gpsSize = gps.getGuidePostsCount();
    int estGuidepostsPerRegion = gpsSize == 0 ? 1 : gpsSize / regionLocations.size() + 1;
    int keyOffset = 0;
    ImmutableBytesWritable currentGuidePost = ByteUtil.EMPTY_IMMUTABLE_BYTE_ARRAY;
    List<Scan> scans = Lists.newArrayListWithExpectedSize(estGuidepostsPerRegion);
    ImmutableBytesWritable guidePosts = gps.getGuidePosts();
    ByteArrayInputStream stream = null;
    DataInput input = null;
    PrefixByteDecoder decoder = null;
    int guideIndex = 0;
    long estimatedRows = 0;
    long estimatedSize = 0;
    try {
        if (gpsSize > 0) {
            stream = new ByteArrayInputStream(guidePosts.get(), guidePosts.getOffset(), guidePosts.getLength());
            input = new DataInputStream(stream);
            decoder = new PrefixByteDecoder(gps.getMaxLength());
            try {
                while (currentKey.compareTo(currentGuidePost = PrefixByteCodec.decode(decoder, input)) >= 0 && currentKey.getLength() != 0) {
                    guideIndex++;
                }
            } catch (EOFException e) {
            }
        }
        byte[] currentKeyBytes = currentKey.copyBytes();
        // Merge bisect with guideposts for all but the last region
        while (regionIndex <= stopIndex) {
            HRegionLocation regionLocation = regionLocations.get(regionIndex);
            HRegionInfo regionInfo = regionLocation.getRegionInfo();
            byte[] currentGuidePostBytes = currentGuidePost.copyBytes();
            byte[] endKey, endRegionKey = EMPTY_BYTE_ARRAY;
            if (regionIndex == stopIndex) {
                endKey = stopKey;
            } else {
                endKey = regionBoundaries.get(regionIndex);
            }
            if (isLocalIndex) {
                endRegionKey = regionInfo.getEndKey();
                keyOffset = ScanUtil.getRowKeyOffset(regionInfo.getStartKey(), endRegionKey);
            }
            try {
                while (guideIndex < gpsSize && (endKey.length == 0 || currentGuidePost.compareTo(endKey) <= 0)) {
                    Scan newScan = scanRanges.intersectScan(scan, currentKeyBytes, currentGuidePostBytes, keyOffset, false);
                    if (newScan != null) {
                        ScanUtil.setLocalIndexAttributes(newScan, keyOffset, regionInfo.getStartKey(), regionInfo.getEndKey(), newScan.getStartRow(), newScan.getStopRow());
                        estimatedRows += gps.getRowCounts()[guideIndex];
                        estimatedSize += gps.getByteCounts()[guideIndex];
                    }
                    if (useStatsForParallelization) {
                        scans = addNewScan(parallelScans, scans, newScan, currentGuidePostBytes, false, regionLocation);
                    }
                    currentKeyBytes = currentGuidePostBytes;
                    currentGuidePost = PrefixByteCodec.decode(decoder, input);
                    currentGuidePostBytes = currentGuidePost.copyBytes();
                    guideIndex++;
                }
            } catch (EOFException e) {
            }
            Scan newScan = scanRanges.intersectScan(scan, currentKeyBytes, endKey, keyOffset, true);
            if (newScan != null) {
                ScanUtil.setLocalIndexAttributes(newScan, keyOffset, regionInfo.getStartKey(), regionInfo.getEndKey(), newScan.getStartRow(), newScan.getStopRow());
            }
            scans = addNewScan(parallelScans, scans, newScan, endKey, true, regionLocation);
            currentKeyBytes = endKey;
            regionIndex++;
        }
        if (scanRanges.isPointLookup()) {
            this.estimatedRows = Long.valueOf(scanRanges.getPointLookupCount());
            this.estimatedSize = this.estimatedRows * SchemaUtil.estimateRowSize(table);
        } else if (hasGuidePosts) {
            this.estimatedRows = estimatedRows;
            this.estimatedSize = estimatedSize;
        } else {
            this.estimatedRows = null;
            this.estimatedSize = null;
        }
        if (!scans.isEmpty()) {
            // Add any remaining scans
            parallelScans.add(scans);
        }
    } finally {
        if (stream != null)
            Closeables.closeQuietly(stream);
    }
    return parallelScans;
}
Also used : PrefixByteDecoder(org.apache.phoenix.util.PrefixByteDecoder) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) GuidePostsInfo(org.apache.phoenix.schema.stats.GuidePostsInfo) DataInputStream(java.io.DataInputStream) ScanRanges(org.apache.phoenix.compile.ScanRanges) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) DataInput(java.io.DataInput) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) ByteArrayInputStream(java.io.ByteArrayInputStream) EOFException(java.io.EOFException) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) Scan(org.apache.hadoop.hbase.client.Scan)

Example 8 with ScanRanges

use of org.apache.phoenix.compile.ScanRanges in project phoenix by apache.

the class ExplainTable method explain.

protected void explain(String prefix, List<String> planSteps) {
    StringBuilder buf = new StringBuilder(prefix);
    ScanRanges scanRanges = context.getScanRanges();
    Scan scan = context.getScan();
    if (scan.getConsistency() != Consistency.STRONG) {
        buf.append("TIMELINE-CONSISTENCY ");
    }
    if (hint.hasHint(Hint.SMALL)) {
        buf.append(Hint.SMALL).append(" ");
    }
    if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
        buf.append("REVERSE ");
    }
    if (scanRanges.isEverything()) {
        buf.append("FULL SCAN ");
    } else {
        explainSkipScan(buf);
    }
    buf.append("OVER ").append(tableRef.getTable().getPhysicalName().getString());
    if (!scanRanges.isPointLookup()) {
        appendKeyRanges(buf);
    }
    planSteps.add(buf.toString());
    if (context.getScan() != null && tableRef.getTable().getRowTimestampColPos() != -1) {
        TimeRange range = context.getScan().getTimeRange();
        planSteps.add("    ROW TIMESTAMP FILTER [" + range.getMin() + ", " + range.getMax() + ")");
    }
    PageFilter pageFilter = null;
    FirstKeyOnlyFilter firstKeyOnlyFilter = null;
    BooleanExpressionFilter whereFilter = null;
    DistinctPrefixFilter distinctFilter = null;
    Iterator<Filter> filterIterator = ScanUtil.getFilterIterator(scan);
    if (filterIterator.hasNext()) {
        do {
            Filter filter = filterIterator.next();
            if (filter instanceof FirstKeyOnlyFilter) {
                firstKeyOnlyFilter = (FirstKeyOnlyFilter) filter;
            } else if (filter instanceof PageFilter) {
                pageFilter = (PageFilter) filter;
            } else if (filter instanceof BooleanExpressionFilter) {
                whereFilter = (BooleanExpressionFilter) filter;
            } else if (filter instanceof DistinctPrefixFilter) {
                distinctFilter = (DistinctPrefixFilter) filter;
            }
        } while (filterIterator.hasNext());
    }
    if (whereFilter != null) {
        planSteps.add("    SERVER FILTER BY " + (firstKeyOnlyFilter == null ? "" : "FIRST KEY ONLY AND ") + whereFilter.toString());
    } else if (firstKeyOnlyFilter != null) {
        planSteps.add("    SERVER FILTER BY FIRST KEY ONLY");
    }
    if (distinctFilter != null) {
        planSteps.add("    SERVER DISTINCT PREFIX FILTER OVER " + groupBy.getExpressions().toString());
    }
    if (!orderBy.getOrderByExpressions().isEmpty() && groupBy.isEmpty()) {
        // with GROUP BY, sort happens client-side
        planSteps.add("    SERVER" + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + " SORTED BY " + orderBy.getOrderByExpressions().toString());
    } else {
        if (offset != null) {
            planSteps.add("    SERVER OFFSET " + offset);
        }
        if (pageFilter != null) {
            planSteps.add("    SERVER " + pageFilter.getPageSize() + " ROW LIMIT");
        }
    }
    Integer groupByLimit = null;
    byte[] groupByLimitBytes = scan.getAttribute(BaseScannerRegionObserver.GROUP_BY_LIMIT);
    if (groupByLimitBytes != null) {
        groupByLimit = (Integer) PInteger.INSTANCE.toObject(groupByLimitBytes);
    }
    groupBy.explain(planSteps, groupByLimit);
    if (scan.getAttribute(BaseScannerRegionObserver.SPECIFIC_ARRAY_INDEX) != null) {
        planSteps.add("    SERVER ARRAY ELEMENT PROJECTION");
    }
}
Also used : PInteger(org.apache.phoenix.schema.types.PInteger) TimeRange(org.apache.hadoop.hbase.io.TimeRange) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) BooleanExpressionFilter(org.apache.phoenix.filter.BooleanExpressionFilter) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) DistinctPrefixFilter(org.apache.phoenix.filter.DistinctPrefixFilter) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) Filter(org.apache.hadoop.hbase.filter.Filter) Scan(org.apache.hadoop.hbase.client.Scan) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) DistinctPrefixFilter(org.apache.phoenix.filter.DistinctPrefixFilter) ScanRanges(org.apache.phoenix.compile.ScanRanges) BooleanExpressionFilter(org.apache.phoenix.filter.BooleanExpressionFilter)

Example 9 with ScanRanges

use of org.apache.phoenix.compile.ScanRanges in project phoenix by apache.

the class ExplainTable method appendPKColumnValue.

private void appendPKColumnValue(StringBuilder buf, byte[] range, Boolean isNull, int slotIndex, boolean changeViewIndexId) {
    if (Boolean.TRUE.equals(isNull)) {
        buf.append("null");
        return;
    }
    if (Boolean.FALSE.equals(isNull)) {
        buf.append("not null");
        return;
    }
    if (range.length == 0) {
        buf.append('*');
        return;
    }
    ScanRanges scanRanges = context.getScanRanges();
    PDataType type = scanRanges.getSchema().getField(slotIndex).getDataType();
    SortOrder sortOrder = tableRef.getTable().getPKColumns().get(slotIndex).getSortOrder();
    if (sortOrder == SortOrder.DESC) {
        buf.append('~');
        ImmutableBytesWritable ptr = new ImmutableBytesWritable(range);
        type.coerceBytes(ptr, type, sortOrder, SortOrder.getDefault());
        range = ptr.get();
    }
    if (changeViewIndexId) {
        Short s = (Short) type.toObject(range);
        s = (short) (s + (-Short.MAX_VALUE));
        buf.append(s.toString());
    } else {
        Format formatter = context.getConnection().getFormatter(type);
        buf.append(type.toStringLiteral(range, formatter));
    }
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) Format(java.text.Format) PDataType(org.apache.phoenix.schema.types.PDataType) SortOrder(org.apache.phoenix.schema.SortOrder) ScanRanges(org.apache.phoenix.compile.ScanRanges)

Example 10 with ScanRanges

use of org.apache.phoenix.compile.ScanRanges in project phoenix by apache.

the class ExplainTable method appendScanRow.

private void appendScanRow(StringBuilder buf, Bound bound) {
    ScanRanges scanRanges = context.getScanRanges();
    // TODO: review this and potentially intersect the scan ranges
    // with the minMaxRange in ScanRanges to prevent having to do all this.
    KeyRange minMaxRange = scanRanges.getMinMaxRange();
    Iterator<byte[]> minMaxIterator = Iterators.emptyIterator();
    if (minMaxRange != KeyRange.EVERYTHING_RANGE) {
        RowKeySchema schema = tableRef.getTable().getRowKeySchema();
        if (!minMaxRange.isUnbound(bound)) {
            minMaxIterator = new RowKeyValueIterator(schema, minMaxRange.getRange(bound));
        }
    }
    boolean isLocalIndex = ScanUtil.isLocalIndex(context.getScan());
    boolean forceSkipScan = this.hint.hasHint(Hint.SKIP_SCAN);
    int nRanges = forceSkipScan ? scanRanges.getRanges().size() : scanRanges.getBoundSlotCount();
    for (int i = 0, minPos = 0; minPos < nRanges || minMaxIterator.hasNext(); i++) {
        List<KeyRange> ranges = minPos >= nRanges ? EVERYTHING : scanRanges.getRanges().get(minPos++);
        KeyRange range = bound == Bound.LOWER ? ranges.get(0) : ranges.get(ranges.size() - 1);
        byte[] b = range.getRange(bound);
        Boolean isNull = KeyRange.IS_NULL_RANGE == range ? Boolean.TRUE : KeyRange.IS_NOT_NULL_RANGE == range ? Boolean.FALSE : null;
        if (minMaxIterator.hasNext()) {
            byte[] bMinMax = minMaxIterator.next();
            int cmp = Bytes.compareTo(bMinMax, b) * (bound == Bound.LOWER ? 1 : -1);
            if (cmp > 0) {
                minPos = nRanges;
                b = bMinMax;
                isNull = null;
            } else if (cmp < 0) {
                minMaxIterator = Iterators.emptyIterator();
            }
        }
        if (isLocalIndex && i == 0) {
            appendPKColumnValue(buf, b, isNull, i, true);
        } else {
            appendPKColumnValue(buf, b, isNull, i, false);
        }
        buf.append(',');
    }
}
Also used : KeyRange(org.apache.phoenix.query.KeyRange) RowKeySchema(org.apache.phoenix.schema.RowKeySchema) ScanRanges(org.apache.phoenix.compile.ScanRanges) Hint(org.apache.phoenix.parse.HintNode.Hint)

Aggregations

ScanRanges (org.apache.phoenix.compile.ScanRanges)11 Scan (org.apache.hadoop.hbase.client.Scan)6 KeyRange (org.apache.phoenix.query.KeyRange)6 ArrayList (java.util.ArrayList)4 List (java.util.List)3 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)3 Hint (org.apache.phoenix.parse.HintNode.Hint)3 Mutation (org.apache.hadoop.hbase.client.Mutation)2 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)2 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)2 SkipScanFilter (org.apache.phoenix.filter.SkipScanFilter)2 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)2 RowKeySchema (org.apache.phoenix.schema.RowKeySchema)2 PSmallint (org.apache.phoenix.schema.types.PSmallint)2 PTinyint (org.apache.phoenix.schema.types.PTinyint)2 ImmutableList (com.google.common.collect.ImmutableList)1 ByteArrayInputStream (java.io.ByteArrayInputStream)1 DataInput (java.io.DataInput)1 DataInputStream (java.io.DataInputStream)1 EOFException (java.io.EOFException)1