Search in sources :

Example 81 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class NonAggregateRegionScannerFactory method getRegionScanner.

@Override
public RegionScanner getRegionScanner(final Scan scan, final RegionScanner s) throws Throwable {
    int offset = 0;
    if (ScanUtil.isLocalIndex(scan)) {
        /*
             * For local indexes, we need to set an offset on row key expressions to skip
             * the region start key.
             */
        Region region = getRegion();
        offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length;
        ScanUtil.setRowKeyOffset(scan, offset);
    }
    byte[] scanOffsetBytes = scan.getAttribute(BaseScannerRegionObserver.SCAN_OFFSET);
    Integer scanOffset = null;
    if (scanOffsetBytes != null) {
        scanOffset = (Integer) PInteger.INSTANCE.toObject(scanOffsetBytes);
    }
    RegionScanner innerScanner = s;
    Set<KeyValueColumnExpression> arrayKVRefs = Sets.newHashSet();
    Expression[] arrayFuncRefs = deserializeArrayPostionalExpressionInfoFromScan(scan, innerScanner, arrayKVRefs);
    TupleProjector tupleProjector = null;
    Region dataRegion = null;
    IndexMaintainer indexMaintainer = null;
    byte[][] viewConstants = null;
    Transaction tx = null;
    ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
    if (dataColumns != null) {
        tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
        dataRegion = env.getRegion();
        boolean useProto = false;
        byte[] localIndexBytes = scan.getAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO);
        useProto = localIndexBytes != null;
        if (localIndexBytes == null) {
            localIndexBytes = scan.getAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD);
        }
        List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto);
        indexMaintainer = indexMaintainers.get(0);
        viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
        byte[] txState = scan.getAttribute(BaseScannerRegionObserver.TX_STATE);
        tx = MutationState.decodeTransaction(txState);
    }
    final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
    final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
    boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(getMinMaxQualifiersFromScan(scan)) && scan.getAttribute(BaseScannerRegionObserver.TOPN) != null;
    // setting dataRegion in case of a non-coprocessor environment
    if (dataRegion == null && env.getConfiguration().get(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY) != null) {
        dataRegion = env.getRegion();
    }
    innerScanner = getWrappedScanner(env, innerScanner, arrayKVRefs, arrayFuncRefs, offset, scan, dataColumns, tupleProjector, dataRegion, indexMaintainer, tx, viewConstants, kvSchema, kvSchemaBitSet, j == null ? p : null, ptr, useQualifierAsIndex);
    final ImmutableBytesPtr tenantId = ScanUtil.getTenantId(scan);
    if (j != null) {
        innerScanner = new HashJoinRegionScanner(innerScanner, p, j, tenantId, env, useQualifierAsIndex, useNewValueColumnQualifier);
    }
    if (scanOffset != null) {
        innerScanner = getOffsetScanner(innerScanner, new OffsetResultIterator(new RegionScannerResultIterator(innerScanner, getMinMaxQualifiersFromScan(scan), encodingScheme), scanOffset), scan.getAttribute(QueryConstants.LAST_SCAN) != null);
    }
    final OrderedResultIterator iterator = deserializeFromScan(scan, innerScanner);
    if (iterator == null) {
        return innerScanner;
    }
    // TODO:the above wrapped scanner should be used here also
    return getTopNScanner(env, innerScanner, iterator, tenantId);
}
Also used : TupleProjector(org.apache.phoenix.execute.TupleProjector) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PInteger(org.apache.phoenix.schema.types.PInteger) BaseRegionScanner(org.apache.phoenix.coprocessor.BaseRegionScanner) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) HashJoinRegionScanner(org.apache.phoenix.coprocessor.HashJoinRegionScanner) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) Transaction(org.apache.tephra.Transaction) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) OrderByExpression(org.apache.phoenix.expression.OrderByExpression) HashJoinInfo(org.apache.phoenix.join.HashJoinInfo) Region(org.apache.hadoop.hbase.regionserver.Region) HashJoinRegionScanner(org.apache.phoenix.coprocessor.HashJoinRegionScanner) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 82 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class DefaultStatisticsCollector method collectStatistics.

/**
     * Update the current statistics based on the latest batch of key-values from the underlying scanner
     * 
     * @param results
     *            next batch of {@link KeyValue}s
     * @throws IOException 
     */
@Override
public void collectStatistics(final List<Cell> results) {
    // A guide posts depth of zero disables the collection of stats
    if (guidePostDepth == 0) {
        return;
    }
    Map<ImmutableBytesPtr, Boolean> famMap = Maps.newHashMap();
    boolean incrementRow = true;
    for (Cell cell : results) {
        KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
        maxTimeStamp = Math.max(maxTimeStamp, kv.getTimestamp());
        Pair<Long, GuidePostsInfoBuilder> gps;
        if (cachedGuidePosts == null) {
            ImmutableBytesPtr cfKey = new ImmutableBytesPtr(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength());
            gps = guidePostsInfoWriterMap.get(cfKey);
            if (gps == null) {
                gps = new Pair<Long, GuidePostsInfoBuilder>(0l, new GuidePostsInfoBuilder());
                guidePostsInfoWriterMap.put(cfKey, gps);
            }
            if (famMap.get(cfKey) == null) {
                famMap.put(cfKey, true);
                gps.getSecond().incrementRowCount();
            }
        } else {
            gps = cachedGuidePosts;
            if (incrementRow) {
                cachedGuidePosts.getSecond().incrementRowCount();
                incrementRow = false;
            }
        }
        int kvLength = kv.getLength();
        long byteCount = gps.getFirst() + kvLength;
        gps.setFirst(byteCount);
        if (byteCount >= guidePostDepth) {
            ImmutableBytesWritable row = new ImmutableBytesWritable(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
            if (gps.getSecond().addGuidePosts(row, byteCount, gps.getSecond().getRowCount())) {
                gps.setFirst(0l);
                gps.getSecond().resetRowCount();
            }
        }
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PLong(org.apache.phoenix.schema.types.PLong) Cell(org.apache.hadoop.hbase.Cell)

Example 83 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class DistinctValueClientAggregator method evaluate.

@Override
public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
    if (cachedResult == null) {
        Object[] values = new Object[valueVsCount.size()];
        int i = 0;
        for (ImmutableBytesPtr key : valueVsCount.keySet()) {
            values[i++] = valueType.toObject(key, sortOrder);
        }
        cachedResult = PArrayDataType.instantiatePhoenixArray(valueType, values);
    }
    buffer = resultType.toBytes(cachedResult, sortOrder);
    ptr.set(buffer);
    return true;
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)

Example 84 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class DistinctValueWithCountClientAggregator method aggregate.

@Override
public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) {
    if (tuple instanceof SingleKeyValueTuple) {
        // Case when scanners do look ahead and re-aggregate result row.The result is already available in the ptr
        PDataType resultDataType = getResultDataType();
        cachedResult = resultDataType.toObject(ptr, resultDataType, sortOrder);
    } else {
        InputStream is;
        try {
            if (Bytes.equals(ptr.get(), ptr.getOffset(), 1, DistinctValueWithCountServerAggregator.COMPRESS_MARKER, 0, 1)) {
                // This reads the uncompressed length from the front of the compressed input
                int uncompressedLength = Snappy.getUncompressedLength(ptr.get(), ptr.getOffset() + 1);
                byte[] uncompressed = new byte[uncompressedLength];
                // This will throw CorruptionException, a RuntimeException if the snappy data is invalid.
                // We're making a RuntimeException out of a checked IOException below so assume it's ok
                // to let any CorruptionException escape.
                Snappy.uncompress(ptr.get(), ptr.getOffset() + 1, ptr.getLength() - 1, uncompressed, 0);
                is = new ByteArrayInputStream(uncompressed, 0, uncompressedLength);
            } else {
                is = new ByteArrayInputStream(ptr.get(), ptr.getOffset() + 1, ptr.getLength() - 1);
            }
            DataInputStream in = new DataInputStream(is);
            int mapSize = WritableUtils.readVInt(in);
            for (int i = 0; i < mapSize; i++) {
                int keyLen = WritableUtils.readVInt(in);
                byte[] keyBytes = new byte[keyLen];
                in.read(keyBytes, 0, keyLen);
                ImmutableBytesPtr key = new ImmutableBytesPtr(keyBytes);
                int value = WritableUtils.readVInt(in);
                Integer curCount = valueVsCount.get(key);
                if (curCount == null) {
                    valueVsCount.put(key, value);
                } else {
                    valueVsCount.put(key, curCount + value);
                }
                totalCount += value;
            }
        } catch (IOException ioe) {
            // Impossible as we're using a ByteArrayInputStream
            throw new RuntimeException(ioe);
        }
    }
    if (buffer == null) {
        initBuffer();
    }
}
Also used : PDataType(org.apache.phoenix.schema.types.PDataType) ByteArrayInputStream(java.io.ByteArrayInputStream) DataInputStream(java.io.DataInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream)

Example 85 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class DistinctValueWithCountServerAggregator method aggregate.

@Override
public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) {
    ImmutableBytesPtr key = ptr.get().length > FIXED_COPY_THRESHOLD && ptr.get().length > ptr.getLength() * COPY_THRESHOLD ? new ImmutableBytesPtr(ptr.copyBytes()) : new ImmutableBytesPtr(ptr);
    Integer count = this.valueVsCount.get(key);
    if (count == null) {
        this.valueVsCount.put(key, 1);
    } else {
        this.valueVsCount.put(key, ++count);
    }
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)

Aggregations

ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)120 Mutation (org.apache.hadoop.hbase.client.Mutation)31 PTable (org.apache.phoenix.schema.PTable)28 ArrayList (java.util.ArrayList)27 Region (org.apache.hadoop.hbase.regionserver.Region)22 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)22 Test (org.junit.Test)21 Cell (org.apache.hadoop.hbase.Cell)20 Put (org.apache.hadoop.hbase.client.Put)18 List (java.util.List)15 Scan (org.apache.hadoop.hbase.client.Scan)15 Pair (org.apache.hadoop.hbase.util.Pair)15 IOException (java.io.IOException)14 Expression (org.apache.phoenix.expression.Expression)14 PColumn (org.apache.phoenix.schema.PColumn)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)13 PSmallint (org.apache.phoenix.schema.types.PSmallint)12 HashMap (java.util.HashMap)11 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)11 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)11