use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project hbase by apache.
the class TestPartialResultsFromClientSide method testMayHaveMoreCellsInRowReturnsTrueAndSetBatch.
@Test
public void testMayHaveMoreCellsInRowReturnsTrueAndSetBatch() throws IOException {
Table table = createTestTable(TableName.valueOf(name.getMethodName()), ROWS, FAMILIES, QUALIFIERS, VALUE);
Scan scan = new Scan();
scan.setBatch(1);
scan.setFilter(new FirstKeyOnlyFilter());
ResultScanner scanner = table.getScanner(scan);
Result result;
while ((result = scanner.next()) != null) {
assertTrue(result.rawCells() != null);
assertEquals(1, result.rawCells().length);
}
}
use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project hbase by apache.
the class RegionMover method isSuccessfulScan.
/**
* Tries to scan a row from passed region
* @param admin
* @param region
* @throws IOException
*/
private void isSuccessfulScan(Admin admin, HRegionInfo region) throws IOException {
Scan scan = new Scan(region.getStartKey());
scan.setBatch(1);
scan.setCaching(1);
scan.setFilter(new FirstKeyOnlyFilter());
try {
Table table = admin.getConnection().getTable(region.getTable());
try {
ResultScanner scanner = table.getScanner(scan);
try {
scanner.next();
} finally {
scanner.close();
}
} finally {
table.close();
}
} catch (IOException e) {
LOG.error("Could not scan region:" + region.getEncodedName(), e);
throw e;
}
}
use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project hbase by apache.
the class TestSeekBeforeWithReverseScan method testReverseScanWithPadding.
@Test
public void testReverseScanWithPadding() throws Exception {
byte[] terminator = new byte[] { -1 };
byte[] row1 = Bytes.add(invert(Bytes.toBytes("a")), terminator);
byte[] row2 = Bytes.add(invert(Bytes.toBytes("ab")), terminator);
byte[] row3 = Bytes.add(invert(Bytes.toBytes("b")), terminator);
Put put1 = new Put(row1);
put1.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
Put put2 = new Put(row2);
put2.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
Put put3 = new Put(row3);
put3.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
region.put(put1);
region.put(put2);
region.put(put3);
region.flush(true);
Scan scan = new Scan();
scan.setCacheBlocks(false);
scan.setReversed(true);
scan.setFilter(new FirstKeyOnlyFilter());
scan.addFamily(cfName);
RegionScanner scanner = region.getScanner(scan);
List<Cell> res = new ArrayList<>();
int count = 1;
while (scanner.next(res)) {
count++;
}
assertEquals(3, count);
}
use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project hbase by apache.
the class BulkDeleteEndpoint method delete.
@Override
public void delete(RpcController controller, BulkDeleteRequest request, RpcCallback<BulkDeleteResponse> done) {
long totalRowsDeleted = 0L;
long totalVersionsDeleted = 0L;
Region region = env.getRegion();
int rowBatchSize = request.getRowBatchSize();
Long timestamp = null;
if (request.hasTimestamp()) {
timestamp = request.getTimestamp();
}
DeleteType deleteType = request.getDeleteType();
boolean hasMore = true;
RegionScanner scanner = null;
try {
Scan scan = ProtobufUtil.toScan(request.getScan());
if (scan.getFilter() == null && deleteType == DeleteType.ROW) {
// What we need is just the rowkeys. So only 1st KV from any row is enough.
// Only when it is a row delete, we can apply this filter.
// In other types we rely on the scan to know which all columns to be deleted.
scan.setFilter(new FirstKeyOnlyFilter());
}
// Here by assume that the scan is perfect with the appropriate
// filter and having necessary column(s).
scanner = region.getScanner(scan);
while (hasMore) {
List<List<Cell>> deleteRows = new ArrayList<>(rowBatchSize);
for (int i = 0; i < rowBatchSize; i++) {
List<Cell> results = new ArrayList<>();
hasMore = scanner.next(results);
if (results.size() > 0) {
deleteRows.add(results);
}
if (!hasMore) {
// There are no more rows.
break;
}
}
if (deleteRows.size() > 0) {
Mutation[] deleteArr = new Mutation[deleteRows.size()];
int i = 0;
for (List<Cell> deleteRow : deleteRows) {
deleteArr[i++] = createDeleteMutation(deleteRow, deleteType, timestamp);
}
OperationStatus[] opStatus = region.batchMutate(deleteArr, HConstants.NO_NONCE, HConstants.NO_NONCE);
for (i = 0; i < opStatus.length; i++) {
if (opStatus[i].getOperationStatusCode() != OperationStatusCode.SUCCESS) {
break;
}
totalRowsDeleted++;
if (deleteType == DeleteType.VERSION) {
byte[] versionsDeleted = deleteArr[i].getAttribute(NO_OF_VERSIONS_TO_DELETE);
if (versionsDeleted != null) {
totalVersionsDeleted += Bytes.toInt(versionsDeleted);
}
}
}
}
}
} catch (IOException ioe) {
LOG.error(ioe);
// Call ServerRpcController#getFailedOn() to retrieve this IOException at client side.
CoprocessorRpcUtils.setControllerException(controller, ioe);
} finally {
if (scanner != null) {
try {
scanner.close();
} catch (IOException ioe) {
LOG.error(ioe);
}
}
}
Builder responseBuilder = BulkDeleteResponse.newBuilder();
responseBuilder.setRowsDeleted(totalRowsDeleted);
if (deleteType == DeleteType.VERSION) {
responseBuilder.setVersionsDeleted(totalVersionsDeleted);
}
BulkDeleteResponse result = responseBuilder.build();
done.run(result);
}
use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project storm by apache.
the class HBaseWindowsStore method getAllKeys.
@Override
public Iterable<String> getAllKeys() {
Scan scan = new Scan();
// this filter makes sure to receive only Key or row but not values associated with those rows.
scan.setFilter(new FirstKeyOnlyFilter());
//scan.setCaching(1000);
final Iterator<Result> resultIterator;
try {
resultIterator = htable().getScanner(scan).iterator();
} catch (IOException e) {
throw new RuntimeException(e);
}
final Iterator<String> iterator = new Iterator<String>() {
@Override
public boolean hasNext() {
return resultIterator.hasNext();
}
@Override
public String next() {
Result result = resultIterator.next();
String key = null;
try {
key = new String(result.getRow(), UTF_8);
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
return key;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove operation is not supported");
}
};
return new Iterable<String>() {
@Override
public Iterator<String> iterator() {
return iterator;
}
};
}
Aggregations