use of org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateResponse in project hbase by apache.
the class AggregateImplementation method getRowNum.
/**
* Gives the row count for the given column family and column qualifier, in
* the given row range as defined in the Scan object.
*/
@Override
public void getRowNum(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
AggregateResponse response = null;
long counter = 0L;
List<Cell> results = new ArrayList<>();
InternalScanner scanner = null;
try {
Scan scan = ProtobufUtil.toScan(request.getScan());
byte[][] colFamilies = scan.getFamilies();
byte[] colFamily = colFamilies != null ? colFamilies[0] : null;
NavigableSet<byte[]> qualifiers = colFamilies != null ? scan.getFamilyMap().get(colFamily) : null;
byte[] qualifier = null;
if (qualifiers != null && !qualifiers.isEmpty()) {
qualifier = qualifiers.pollFirst();
}
if (scan.getFilter() == null && qualifier == null) {
scan.setFilter(new FirstKeyOnlyFilter());
}
scanner = env.getRegion().getScanner(scan);
boolean hasMoreRows = false;
do {
hasMoreRows = scanner.next(results);
if (results.size() > 0) {
counter++;
}
results.clear();
} while (hasMoreRows);
ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter);
bb.rewind();
response = AggregateResponse.newBuilder().addFirstPart(ByteString.copyFrom(bb)).build();
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
} finally {
if (scanner != null) {
try {
scanner.close();
} catch (IOException ignored) {
}
}
}
log.info("Row counter from this region is " + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + counter);
done.run(response);
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateResponse in project hbase by apache.
the class AggregateImplementation method getMedian.
/**
* Gives a List containing sum of values and sum of weights.
* It is computed for the combination of column
* family and column qualifier(s) in the given row range as defined in the
* Scan object. In its current implementation, it takes one column family and
* two column qualifiers. The first qualifier is for values column and
* the second qualifier (optional) is for weight column.
*/
@Override
public void getMedian(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
AggregateResponse response = null;
InternalScanner scanner = null;
try {
ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
S sumVal = null, sumWeights = null, tempVal = null, tempWeight = null;
Scan scan = ProtobufUtil.toScan(request.getScan());
scanner = env.getRegion().getScanner(scan);
byte[] colFamily = scan.getFamilies()[0];
NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
byte[] valQualifier = null, weightQualifier = null;
if (qualifiers != null && !qualifiers.isEmpty()) {
valQualifier = qualifiers.pollFirst();
// if weighted median is requested, get qualifier for the weight column
weightQualifier = qualifiers.pollLast();
}
List<Cell> results = new ArrayList<>();
boolean hasMoreRows = false;
do {
tempVal = null;
tempWeight = null;
hasMoreRows = scanner.next(results);
int listSize = results.size();
for (int i = 0; i < listSize; i++) {
Cell kv = results.get(i);
tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, valQualifier, kv)));
if (weightQualifier != null) {
tempWeight = ci.add(tempWeight, ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv)));
}
}
results.clear();
sumVal = ci.add(sumVal, tempVal);
sumWeights = ci.add(sumWeights, tempWeight);
} while (hasMoreRows);
ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString();
S s = sumWeights == null ? ci.castToReturnType(ci.getMinValue()) : sumWeights;
ByteString first_sumWeights = ci.getProtoForPromotedType(s).toByteString();
AggregateResponse.Builder pair = AggregateResponse.newBuilder();
pair.addFirstPart(first_sumVal);
pair.addFirstPart(first_sumWeights);
response = pair.build();
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
} finally {
if (scanner != null) {
try {
scanner.close();
} catch (IOException ignored) {
}
}
}
done.run(response);
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateResponse in project hbase by apache.
the class AggregationClient method sum.
/**
* It sums up the value returned from various regions. In case qualifier is
* null, summation of all the column qualifiers in the given family is done.
* @param table table to scan.
* @param ci the user's ColumnInterpreter implementation
* @param scan the HBase scan object to use to read data from HBase
* @return sum <S>
* @throws Throwable The caller is supposed to handle the exception as they are thrown
* & propagated to it.
*/
public <R, S, P extends Message, Q extends Message, T extends Message> S sum(final Table table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false);
class SumCallBack implements Batch.Callback<S> {
S sumVal = null;
public S getSumResult() {
return sumVal;
}
@Override
public synchronized void update(byte[] region, byte[] row, S result) {
sumVal = ci.add(sumVal, result);
}
}
SumCallBack sumCallBack = new SumCallBack();
table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), new Batch.Call<AggregateService, S>() {
@Override
public S call(AggregateService instance) throws IOException {
RpcController controller = new AggregationClientRpcController();
// Not sure what is going on here why I have to do these casts. TODO.
CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>();
instance.getSum(controller, requestArg, rpcCallback);
AggregateResponse response = rpcCallback.get();
if (controller.failed()) {
throw new IOException(controller.errorText());
}
if (response.getFirstPartCount() == 0) {
return null;
}
ByteString b = response.getFirstPart(0);
T t = getParsedGenericInstance(ci.getClass(), 4, b);
S s = ci.getPromotedValueFromProto(t);
return s;
}
}, sumCallBack);
return sumCallBack.getSumResult();
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateResponse in project hbase by apache.
the class AggregationClient method min.
/**
* It gives the minimum value of a column for a given column family for the
* given range. In case qualifier is null, a min of all values for the given
* family is returned.
* @param table table to scan.
* @param ci the user's ColumnInterpreter implementation
* @param scan the HBase scan object to use to read data from HBase
* @return min val <R>
* @throws Throwable The caller is supposed to handle the exception as they are thrown
* & propagated to it.
*/
public <R, S, P extends Message, Q extends Message, T extends Message> R min(final Table table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false);
class MinCallBack implements Batch.Callback<R> {
private R min = null;
public R getMinimum() {
return min;
}
@Override
public synchronized void update(byte[] region, byte[] row, R result) {
min = (min == null || (result != null && ci.compare(result, min) < 0)) ? result : min;
}
}
MinCallBack minCallBack = new MinCallBack();
table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), new Batch.Call<AggregateService, R>() {
@Override
public R call(AggregateService instance) throws IOException {
RpcController controller = new AggregationClientRpcController();
CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>();
instance.getMin(controller, requestArg, rpcCallback);
AggregateResponse response = rpcCallback.get();
if (controller.failed()) {
throw new IOException(controller.errorText());
}
if (response.getFirstPartCount() > 0) {
ByteString b = response.getFirstPart(0);
Q q = getParsedGenericInstance(ci.getClass(), 3, b);
return ci.getCellValueFromProto(q);
}
return null;
}
}, minCallBack);
log.debug("Min fom all regions is: " + minCallBack.getMinimum());
return minCallBack.getMinimum();
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateResponse in project hbase by apache.
the class AggregationClient method getAvgArgs.
/**
* It computes average while fetching sum and row count from all the
* corresponding regions. Approach is to compute a global sum of region level
* sum and rowcount and then compute the average.
* @param table table to scan.
* @param scan the HBase scan object to use to read data from HBase
* @throws Throwable The caller is supposed to handle the exception as they are thrown
* & propagated to it.
*/
private <R, S, P extends Message, Q extends Message, T extends Message> Pair<S, Long> getAvgArgs(final Table table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false);
class AvgCallBack implements Batch.Callback<Pair<S, Long>> {
S sum = null;
Long rowCount = 0L;
public synchronized Pair<S, Long> getAvgArgs() {
return new Pair<>(sum, rowCount);
}
@Override
public synchronized void update(byte[] region, byte[] row, Pair<S, Long> result) {
sum = ci.add(sum, result.getFirst());
rowCount += result.getSecond();
}
}
AvgCallBack avgCallBack = new AvgCallBack();
table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), new Batch.Call<AggregateService, Pair<S, Long>>() {
@Override
public Pair<S, Long> call(AggregateService instance) throws IOException {
RpcController controller = new AggregationClientRpcController();
CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>();
instance.getAvg(controller, requestArg, rpcCallback);
AggregateResponse response = rpcCallback.get();
if (controller.failed()) {
throw new IOException(controller.errorText());
}
Pair<S, Long> pair = new Pair<>(null, 0L);
if (response.getFirstPartCount() == 0) {
return pair;
}
ByteString b = response.getFirstPart(0);
T t = getParsedGenericInstance(ci.getClass(), 4, b);
S s = ci.getPromotedValueFromProto(t);
pair.setFirst(s);
ByteBuffer bb = ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart()));
bb.rewind();
pair.setSecond(bb.getLong());
return pair;
}
}, avgCallBack);
return avgCallBack.getAvgArgs();
}
Aggregations