use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.
the class GroupedAggregateRegionObserver method doPostScannerOpen.
/**
* Replaces the RegionScanner s with a RegionScanner that groups by the key formed by the list
* of expressions from the scan and returns the aggregated rows of each group. For example,
* given the following original rows in the RegionScanner: KEY COL1 row1 a row2 b row3 a row4 a
* the following rows will be returned for COUNT(*): KEY COUNT a 3 b 1 The client is required to
* do a sort and a final aggregation, since multiple rows with the same key may be returned from
* different regions.
*/
@Override
protected RegionScanner doPostScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
boolean keyOrdered = false;
byte[] expressionBytes = scan.getAttribute(BaseScannerRegionObserver.UNORDERED_GROUP_BY_EXPRESSIONS);
if (expressionBytes == null) {
expressionBytes = scan.getAttribute(BaseScannerRegionObserver.KEY_ORDERED_GROUP_BY_EXPRESSIONS);
keyOrdered = true;
}
int offset = 0;
if (ScanUtil.isLocalIndex(scan)) {
/*
* For local indexes, we need to set an offset on row key expressions to skip
* the region start key.
*/
Region region = c.getEnvironment().getRegion();
offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length;
ScanUtil.setRowKeyOffset(scan, offset);
}
List<Expression> expressions = deserializeGroupByExpressions(expressionBytes, 0);
ServerAggregators aggregators = ServerAggregators.deserialize(scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), c.getEnvironment().getConfiguration());
RegionScanner innerScanner = s;
boolean useProto = false;
byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO);
useProto = localIndexBytes != null;
if (localIndexBytes == null) {
localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
}
List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto);
TupleProjector tupleProjector = null;
byte[][] viewConstants = null;
ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
if (ScanUtil.isLocalIndex(scan) || (j == null && p != null)) {
if (dataColumns != null) {
tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
}
ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
innerScanner = getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, c.getEnvironment().getRegion(), indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex);
}
if (j != null) {
innerScanner = new HashJoinRegionScanner(innerScanner, p, j, ScanUtil.getTenantId(scan), c.getEnvironment(), useQualifierAsIndex, useNewValueColumnQualifier);
}
long limit = Long.MAX_VALUE;
byte[] limitBytes = scan.getAttribute(GROUP_BY_LIMIT);
if (limitBytes != null) {
limit = PInteger.INSTANCE.getCodec().decodeInt(limitBytes, 0, SortOrder.getDefault());
}
if (keyOrdered) {
// already in the required group by key order
return scanOrdered(c, scan, innerScanner, expressions, aggregators, limit);
} else {
// Otherwse, collect them all up in an in memory map
return scanUnordered(c, scan, innerScanner, expressions, aggregators, limit);
}
}
use of org.apache.hadoop.hbase.regionserver.RegionScanner in project atlas by apache.
the class HBaseAtlasCoprocessor method preScannerOpen.
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("==> HBaseAtlasCoprocessor.preScannerOpen()");
}
final RegionScanner ret;
try {
activatePluginClassLoader();
ret = implRegionObserver.preScannerOpen(c, scan, s);
} finally {
deactivatePluginClassLoader();
}
if (LOG.isDebugEnabled()) {
LOG.debug("<== HBaseAtlasCoprocessor.preScannerOpen()");
}
return ret;
}
use of org.apache.hadoop.hbase.regionserver.RegionScanner in project atlas by apache.
the class HBaseAtlasCoprocessor method postScannerOpen.
@Override
public RegionScanner postScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("==> HBaseAtlasCoprocessor.postScannerOpen()");
}
final RegionScanner ret;
try {
activatePluginClassLoader();
ret = implRegionObserver.postScannerOpen(c, scan, s);
} finally {
deactivatePluginClassLoader();
}
if (LOG.isDebugEnabled()) {
LOG.debug("<== HBaseAtlasCoprocessor.postScannerOpen()");
}
return ret;
}
use of org.apache.hadoop.hbase.regionserver.RegionScanner in project hbase by apache.
the class RegionProcedureStore method load.
@Override
public void load(ProcedureLoader loader) throws IOException {
List<ProcedureProtos.Procedure> procs = new ArrayList<>();
long maxProcId = 0;
try (RegionScanner scanner = region.getRegionScanner(new Scan().addColumn(PROC_FAMILY, PROC_QUALIFIER))) {
List<Cell> cells = new ArrayList<>();
boolean moreRows;
do {
moreRows = scanner.next(cells);
if (cells.isEmpty()) {
continue;
}
Cell cell = cells.get(0);
cells.clear();
maxProcId = Math.max(maxProcId, Bytes.toLong(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
if (cell.getValueLength() > 0) {
ProcedureProtos.Procedure proto = ProcedureProtos.Procedure.parser().parseFrom(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
procs.add(proto);
}
} while (moreRows);
}
loader.setMaxProcId(maxProcId);
ProcedureTree tree = ProcedureTree.build(procs);
loader.load(tree.getValidProcs());
loader.handleCorrupted(tree.getCorruptedProcs());
}
use of org.apache.hadoop.hbase.regionserver.RegionScanner in project hbase by apache.
the class MultiRowMutationEndpoint method matches.
private boolean matches(Region region, ClientProtos.Condition condition) throws IOException {
byte[] row = condition.getRow().toByteArray();
Filter filter = null;
byte[] family = null;
byte[] qualifier = null;
CompareOperator op = null;
ByteArrayComparable comparator = null;
if (condition.hasFilter()) {
filter = ProtobufUtil.toFilter(condition.getFilter());
} else {
family = condition.getFamily().toByteArray();
qualifier = condition.getQualifier().toByteArray();
op = CompareOperator.valueOf(condition.getCompareType().name());
comparator = ProtobufUtil.toComparator(condition.getComparator());
}
TimeRange timeRange = condition.hasTimeRange() ? ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime();
Get get = new Get(row);
if (family != null) {
checkFamily(region, family);
get.addColumn(family, qualifier);
}
if (filter != null) {
get.setFilter(filter);
}
if (timeRange != null) {
get.setTimeRange(timeRange.getMin(), timeRange.getMax());
}
boolean matches = false;
try (RegionScanner scanner = region.getScanner(new Scan(get))) {
// NOTE: Please don't use HRegion.get() instead,
// because it will copy cells to heap. See HBASE-26036
List<Cell> result = new ArrayList<>();
scanner.next(result);
if (filter != null) {
if (!result.isEmpty()) {
matches = true;
}
} else {
boolean valueIsNull = comparator.getValue() == null || comparator.getValue().length == 0;
if (result.isEmpty() && valueIsNull) {
matches = true;
} else if (result.size() > 0 && result.get(0).getValueLength() == 0 && valueIsNull) {
matches = true;
} else if (result.size() == 1 && !valueIsNull) {
Cell kv = result.get(0);
int compareResult = PrivateCellUtil.compareValue(kv, comparator);
matches = matches(op, compareResult);
}
}
}
return matches;
}
Aggregations