Search in sources :

Example 26 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.

the class GroupedAggregateRegionObserver method doPostScannerOpen.

/**
 * Replaces the RegionScanner s with a RegionScanner that groups by the key formed by the list
 * of expressions from the scan and returns the aggregated rows of each group. For example,
 * given the following original rows in the RegionScanner: KEY COL1 row1 a row2 b row3 a row4 a
 * the following rows will be returned for COUNT(*): KEY COUNT a 3 b 1 The client is required to
 * do a sort and a final aggregation, since multiple rows with the same key may be returned from
 * different regions.
 */
@Override
protected RegionScanner doPostScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
    boolean keyOrdered = false;
    byte[] expressionBytes = scan.getAttribute(BaseScannerRegionObserver.UNORDERED_GROUP_BY_EXPRESSIONS);
    if (expressionBytes == null) {
        expressionBytes = scan.getAttribute(BaseScannerRegionObserver.KEY_ORDERED_GROUP_BY_EXPRESSIONS);
        keyOrdered = true;
    }
    int offset = 0;
    if (ScanUtil.isLocalIndex(scan)) {
        /*
             * For local indexes, we need to set an offset on row key expressions to skip
             * the region start key.
             */
        Region region = c.getEnvironment().getRegion();
        offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length;
        ScanUtil.setRowKeyOffset(scan, offset);
    }
    List<Expression> expressions = deserializeGroupByExpressions(expressionBytes, 0);
    ServerAggregators aggregators = ServerAggregators.deserialize(scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), c.getEnvironment().getConfiguration());
    RegionScanner innerScanner = s;
    boolean useProto = false;
    byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO);
    useProto = localIndexBytes != null;
    if (localIndexBytes == null) {
        localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
    }
    List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto);
    TupleProjector tupleProjector = null;
    byte[][] viewConstants = null;
    ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
    final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
    final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
    boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
    if (ScanUtil.isLocalIndex(scan) || (j == null && p != null)) {
        if (dataColumns != null) {
            tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
            viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
        }
        ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
        innerScanner = getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, c.getEnvironment().getRegion(), indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex);
    }
    if (j != null) {
        innerScanner = new HashJoinRegionScanner(innerScanner, p, j, ScanUtil.getTenantId(scan), c.getEnvironment(), useQualifierAsIndex, useNewValueColumnQualifier);
    }
    long limit = Long.MAX_VALUE;
    byte[] limitBytes = scan.getAttribute(GROUP_BY_LIMIT);
    if (limitBytes != null) {
        limit = PInteger.INSTANCE.getCodec().decodeInt(limitBytes, 0, SortOrder.getDefault());
    }
    if (keyOrdered) {
        // already in the required group by key order
        return scanOrdered(c, scan, innerScanner, expressions, aggregators, limit);
    } else {
        // Otherwse, collect them all up in an in memory map
        return scanUnordered(c, scan, innerScanner, expressions, aggregators, limit);
    }
}
Also used : TupleProjector(org.apache.phoenix.execute.TupleProjector) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ServerAggregators(org.apache.phoenix.expression.aggregator.ServerAggregators) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) Expression(org.apache.phoenix.expression.Expression) HashJoinInfo(org.apache.phoenix.join.HashJoinInfo) Region(org.apache.hadoop.hbase.regionserver.Region) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 27 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project atlas by apache.

the class HBaseAtlasCoprocessor method preScannerOpen.

@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("==> HBaseAtlasCoprocessor.preScannerOpen()");
    }
    final RegionScanner ret;
    try {
        activatePluginClassLoader();
        ret = implRegionObserver.preScannerOpen(c, scan, s);
    } finally {
        deactivatePluginClassLoader();
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("<== HBaseAtlasCoprocessor.preScannerOpen()");
    }
    return ret;
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner)

Example 28 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project atlas by apache.

the class HBaseAtlasCoprocessor method postScannerOpen.

@Override
public RegionScanner postScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("==> HBaseAtlasCoprocessor.postScannerOpen()");
    }
    final RegionScanner ret;
    try {
        activatePluginClassLoader();
        ret = implRegionObserver.postScannerOpen(c, scan, s);
    } finally {
        deactivatePluginClassLoader();
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("<== HBaseAtlasCoprocessor.postScannerOpen()");
    }
    return ret;
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner)

Example 29 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project hbase by apache.

the class RegionProcedureStore method load.

@Override
public void load(ProcedureLoader loader) throws IOException {
    List<ProcedureProtos.Procedure> procs = new ArrayList<>();
    long maxProcId = 0;
    try (RegionScanner scanner = region.getRegionScanner(new Scan().addColumn(PROC_FAMILY, PROC_QUALIFIER))) {
        List<Cell> cells = new ArrayList<>();
        boolean moreRows;
        do {
            moreRows = scanner.next(cells);
            if (cells.isEmpty()) {
                continue;
            }
            Cell cell = cells.get(0);
            cells.clear();
            maxProcId = Math.max(maxProcId, Bytes.toLong(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
            if (cell.getValueLength() > 0) {
                ProcedureProtos.Procedure proto = ProcedureProtos.Procedure.parser().parseFrom(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
                procs.add(proto);
            }
        } while (moreRows);
    }
    loader.setMaxProcId(maxProcId);
    ProcedureTree tree = ProcedureTree.build(procs);
    loader.load(tree.getValidProcs());
    loader.handleCorrupted(tree.getCorruptedProcs());
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) ArrayList(java.util.ArrayList) UnassignProcedure(org.apache.hadoop.hbase.master.assignment.UnassignProcedure) RecoverMetaProcedure(org.apache.hadoop.hbase.master.procedure.RecoverMetaProcedure) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) AssignProcedure(org.apache.hadoop.hbase.master.assignment.AssignProcedure) MoveRegionProcedure(org.apache.hadoop.hbase.master.assignment.MoveRegionProcedure) ServerCrashProcedure(org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure) Scan(org.apache.hadoop.hbase.client.Scan) ProcedureTree(org.apache.hadoop.hbase.procedure2.store.ProcedureTree) ProcedureProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos) Cell(org.apache.hadoop.hbase.Cell)

Example 30 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project hbase by apache.

the class MultiRowMutationEndpoint method matches.

private boolean matches(Region region, ClientProtos.Condition condition) throws IOException {
    byte[] row = condition.getRow().toByteArray();
    Filter filter = null;
    byte[] family = null;
    byte[] qualifier = null;
    CompareOperator op = null;
    ByteArrayComparable comparator = null;
    if (condition.hasFilter()) {
        filter = ProtobufUtil.toFilter(condition.getFilter());
    } else {
        family = condition.getFamily().toByteArray();
        qualifier = condition.getQualifier().toByteArray();
        op = CompareOperator.valueOf(condition.getCompareType().name());
        comparator = ProtobufUtil.toComparator(condition.getComparator());
    }
    TimeRange timeRange = condition.hasTimeRange() ? ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime();
    Get get = new Get(row);
    if (family != null) {
        checkFamily(region, family);
        get.addColumn(family, qualifier);
    }
    if (filter != null) {
        get.setFilter(filter);
    }
    if (timeRange != null) {
        get.setTimeRange(timeRange.getMin(), timeRange.getMax());
    }
    boolean matches = false;
    try (RegionScanner scanner = region.getScanner(new Scan(get))) {
        // NOTE: Please don't use HRegion.get() instead,
        // because it will copy cells to heap. See HBASE-26036
        List<Cell> result = new ArrayList<>();
        scanner.next(result);
        if (filter != null) {
            if (!result.isEmpty()) {
                matches = true;
            }
        } else {
            boolean valueIsNull = comparator.getValue() == null || comparator.getValue().length == 0;
            if (result.isEmpty() && valueIsNull) {
                matches = true;
            } else if (result.size() > 0 && result.get(0).getValueLength() == 0 && valueIsNull) {
                matches = true;
            } else if (result.size() == 1 && !valueIsNull) {
                Cell kv = result.get(0);
                int compareResult = PrivateCellUtil.compareValue(kv, comparator);
                matches = matches(op, compareResult);
            }
        }
    }
    return matches;
}
Also used : ArrayList(java.util.ArrayList) CompareOperator(org.apache.hadoop.hbase.CompareOperator) TimeRange(org.apache.hadoop.hbase.io.TimeRange) ByteArrayComparable(org.apache.hadoop.hbase.filter.ByteArrayComparable) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Filter(org.apache.hadoop.hbase.filter.Filter) Get(org.apache.hadoop.hbase.client.Get) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)97 Scan (org.apache.hadoop.hbase.client.Scan)75 Cell (org.apache.hadoop.hbase.Cell)59 ArrayList (java.util.ArrayList)35 Test (org.junit.Test)35 Put (org.apache.hadoop.hbase.client.Put)33 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)25 Region (org.apache.hadoop.hbase.regionserver.Region)20 List (java.util.List)18 TableId (co.cask.cdap.data2.util.TableId)17 IOException (java.io.IOException)14 Delete (org.apache.hadoop.hbase.client.Delete)14 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)12 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)12 KeyValue (org.apache.hadoop.hbase.KeyValue)11 Configuration (org.apache.hadoop.conf.Configuration)9 ColumnReference (org.apache.phoenix.hbase.index.covered.update.ColumnReference)9 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)9 InvocationOnMock (org.mockito.invocation.InvocationOnMock)8 Result (org.apache.hadoop.hbase.client.Result)6