use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.
the class AggregateImplementation method getMedian.
/**
* Gives a List containing sum of values and sum of weights.
* It is computed for the combination of column
* family and column qualifier(s) in the given row range as defined in the
* Scan object. In its current implementation, it takes one column family and
* two column qualifiers. The first qualifier is for values column and
* the second qualifier (optional) is for weight column.
*/
@Override
public void getMedian(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
AggregateResponse response = null;
InternalScanner scanner = null;
try {
ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
S sumVal = null, sumWeights = null, tempVal = null, tempWeight = null;
Scan scan = ProtobufUtil.toScan(request.getScan());
scanner = env.getRegion().getScanner(scan);
byte[] colFamily = scan.getFamilies()[0];
NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
byte[] valQualifier = null, weightQualifier = null;
if (qualifiers != null && !qualifiers.isEmpty()) {
valQualifier = qualifiers.pollFirst();
// if weighted median is requested, get qualifier for the weight column
weightQualifier = qualifiers.pollLast();
}
List<Cell> results = new ArrayList<>();
boolean hasMoreRows = false;
do {
tempVal = null;
tempWeight = null;
hasMoreRows = scanner.next(results);
int listSize = results.size();
for (int i = 0; i < listSize; i++) {
Cell kv = results.get(i);
tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, valQualifier, kv)));
if (weightQualifier != null) {
tempWeight = ci.add(tempWeight, ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv)));
}
}
results.clear();
sumVal = ci.add(sumVal, tempVal);
sumWeights = ci.add(sumWeights, tempWeight);
} while (hasMoreRows);
ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString();
S s = sumWeights == null ? ci.castToReturnType(ci.getMinValue()) : sumWeights;
ByteString first_sumWeights = ci.getProtoForPromotedType(s).toByteString();
AggregateResponse.Builder pair = AggregateResponse.newBuilder();
pair.addFirstPart(first_sumVal);
pair.addFirstPart(first_sumWeights);
response = pair.build();
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
} finally {
if (scanner != null) {
try {
scanner.close();
} catch (IOException ignored) {
}
}
}
done.run(response);
}
use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.
the class ColumnAggregationEndpoint method sum.
@Override
public void sum(RpcController controller, SumRequest request, RpcCallback<SumResponse> done) {
// aggregate at each region
Scan scan = new Scan();
// Family is required in pb. Qualifier is not.
byte[] family = request.getFamily().toByteArray();
byte[] qualifier = request.hasQualifier() ? request.getQualifier().toByteArray() : null;
if (request.hasQualifier()) {
scan.addColumn(family, qualifier);
} else {
scan.addFamily(family);
}
int sumResult = 0;
InternalScanner scanner = null;
try {
scanner = this.env.getRegion().getScanner(scan);
List<Cell> curVals = new ArrayList<>();
boolean hasMore = false;
do {
curVals.clear();
hasMore = scanner.next(curVals);
for (Cell kv : curVals) {
if (CellUtil.matchingQualifier(kv, qualifier)) {
sumResult += Bytes.toInt(kv.getValueArray(), kv.getValueOffset());
}
}
} while (hasMore);
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
// Set result to -1 to indicate error.
sumResult = -1;
LOG.info("Setting sum result to -1 to indicate error", e);
} finally {
if (scanner != null) {
try {
scanner.close();
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
sumResult = -1;
LOG.info("Setting sum result to -1 to indicate error", e);
}
}
}
LOG.info("Returning result " + sumResult);
done.run(SumResponse.newBuilder().setSum(sumResult).build());
}
use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.
the class TestStripeCompactor method createCompactor.
private StripeCompactor createCompactor(StoreFileWritersCapture writers, KeyValue[] input) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
final Scanner scanner = new Scanner(input);
// Create store mock that is satisfactory for compactor.
HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS);
ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparator.COMPARATOR);
Store store = mock(Store.class);
when(store.getFamily()).thenReturn(col);
when(store.getScanInfo()).thenReturn(si);
when(store.areWritesEnabled()).thenReturn(true);
when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
when(store.getComparator()).thenReturn(CellComparator.COMPARATOR);
return new StripeCompactor(conf, store) {
@Override
protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
return scanner;
}
@Override
protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException {
return scanner;
}
};
}
use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.
the class RowCountEndpoint method getKeyValueCount.
/**
* Returns a count of all KeyValues in the region where this coprocessor is loaded.
*/
@Override
public void getKeyValueCount(RpcController controller, ExampleProtos.CountRequest request, RpcCallback<ExampleProtos.CountResponse> done) {
ExampleProtos.CountResponse response = null;
InternalScanner scanner = null;
try {
scanner = env.getRegion().getScanner(new Scan());
List<Cell> results = new ArrayList<>();
boolean hasMore = false;
long count = 0;
do {
hasMore = scanner.next(results);
for (Cell kv : results) {
count++;
}
results.clear();
} while (hasMore);
response = ExampleProtos.CountResponse.newBuilder().setCount(count).build();
} catch (IOException ioe) {
CoprocessorRpcUtils.setControllerException(controller, ioe);
} finally {
if (scanner != null) {
try {
scanner.close();
} catch (IOException ignored) {
}
}
}
done.run(response);
}
use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.
the class Compactor method compact.
protected List<Path> compact(final CompactionRequest request, InternalScannerFactory scannerFactory, CellSinkFactory<T> sinkFactory, ThroughputController throughputController, User user) throws IOException {
FileDetails fd = getFileDetails(request.getFiles(), request.isAllFiles());
this.progress = new CompactionProgress(fd.maxKeyCount);
// Find the smallest read point across all the Scanners.
long smallestReadPoint = getSmallestReadPoint();
List<StoreFileScanner> scanners;
Collection<StoreFile> readersToClose;
T writer = null;
boolean dropCache;
if (request.isMajor() || request.isAllFiles()) {
dropCache = this.dropCacheMajor;
} else {
dropCache = this.dropCacheMinor;
}
if (this.conf.getBoolean("hbase.regionserver.compaction.private.readers", true)) {
// clone all StoreFiles, so we'll do the compaction on a independent copy of StoreFiles,
// HFiles, and their readers
readersToClose = new ArrayList<>(request.getFiles().size());
for (StoreFile f : request.getFiles()) {
StoreFile clonedStoreFile = f.cloneForReader();
// create the reader after the store file is cloned in case
// the sequence id is used for sorting in scanners
clonedStoreFile.createReader();
readersToClose.add(clonedStoreFile);
}
scanners = createFileScanners(readersToClose, smallestReadPoint, dropCache);
} else {
readersToClose = Collections.emptyList();
scanners = createFileScanners(request.getFiles(), smallestReadPoint, dropCache);
}
InternalScanner scanner = null;
boolean finished = false;
try {
/* Include deletes, unless we are doing a major compaction */
ScanType scanType = scannerFactory.getScanType(request);
scanner = preCreateCoprocScanner(request, scanType, fd.earliestPutTs, scanners, user, smallestReadPoint);
if (scanner == null) {
scanner = scannerFactory.createScanner(scanners, scanType, fd, smallestReadPoint);
}
scanner = postCreateCoprocScanner(request, scanType, scanner, user);
if (scanner == null) {
// NULL scanner returned from coprocessor hooks means skip normal processing.
return new ArrayList<>();
}
boolean cleanSeqId = false;
if (fd.minSeqIdToKeep > 0) {
smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint);
cleanSeqId = true;
}
writer = sinkFactory.createWriter(scanner, fd, dropCache);
finished = performCompaction(fd, scanner, writer, smallestReadPoint, cleanSeqId, throughputController, request.isAllFiles(), request.getFiles().size());
if (!finished) {
throw new InterruptedIOException("Aborting compaction of store " + store + " in region " + store.getRegionInfo().getRegionNameAsString() + " because it was interrupted.");
}
} finally {
Closeables.close(scanner, true);
for (StoreFile f : readersToClose) {
try {
f.closeReader(true);
} catch (IOException e) {
LOG.warn("Exception closing " + f, e);
}
}
if (!finished && writer != null) {
abortWriter(writer);
}
}
assert finished : "We should have exited the method on all error paths";
assert writer != null : "Writer should be non-null if no error";
return commitWriter(writer, fd, request);
}
Aggregations