use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestScannersFromClientSide method testGetRowOffset.
/**
* Test from client side for get with rowOffset
*
* @throws Exception
*/
@Test
public void testGetRowOffset() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[][] FAMILIES = HTestConst.makeNAscii(FAMILY, 3);
byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 20);
Table ht = TEST_UTIL.createTable(tableName, FAMILIES);
Get get;
Put put;
Result result;
boolean toLog = true;
List<Cell> kvListExp;
// Insert one CF for row
kvListExp = new ArrayList<>();
put = new Put(ROW);
for (int i = 0; i < 10; i++) {
KeyValue kv = new KeyValue(ROW, FAMILIES[0], QUALIFIERS[i], 1, VALUE);
put.add(kv);
// skipping first two kvs
if (i < 2)
continue;
kvListExp.add(kv);
}
ht.put(put);
//setting offset to 2
get = new Get(ROW);
get.setRowOffsetPerColumnFamily(2);
result = ht.get(get);
verifyResult(result, kvListExp, toLog, "Testing basic setRowOffset");
//setting offset to 20
get = new Get(ROW);
get.setRowOffsetPerColumnFamily(20);
result = ht.get(get);
kvListExp = new ArrayList<>();
verifyResult(result, kvListExp, toLog, "Testing offset > #kvs");
//offset + maxResultPerCF
get = new Get(ROW);
get.setRowOffsetPerColumnFamily(4);
get.setMaxResultsPerColumnFamily(5);
result = ht.get(get);
kvListExp = new ArrayList<>();
for (int i = 4; i < 9; i++) {
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[i], 1, VALUE));
}
verifyResult(result, kvListExp, toLog, "Testing offset + setMaxResultsPerCF");
// Filters: ColumnRangeFilter
get = new Get(ROW);
get.setRowOffsetPerColumnFamily(1);
get.setFilter(new ColumnRangeFilter(QUALIFIERS[2], true, QUALIFIERS[5], true));
result = ht.get(get);
kvListExp = new ArrayList<>();
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[3], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[4], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[5], 1, VALUE));
verifyResult(result, kvListExp, toLog, "Testing offset with CRF");
// 10 columns for CF2, 10 columns for CF1
for (int j = 2; j > 0; j--) {
put = new Put(ROW);
for (int i = 0; i < 10; i++) {
KeyValue kv = new KeyValue(ROW, FAMILIES[j], QUALIFIERS[i], 1, VALUE);
put.add(kv);
}
ht.put(put);
}
get = new Get(ROW);
get.setRowOffsetPerColumnFamily(4);
get.setMaxResultsPerColumnFamily(2);
get.addFamily(FAMILIES[1]);
get.addFamily(FAMILIES[2]);
result = ht.get(get);
kvListExp = new ArrayList<>();
//Exp: CF1:q4, q5, CF2: q4, q5
kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[4], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[5], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[4], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[5], 1, VALUE));
verifyResult(result, kvListExp, toLog, "Testing offset + multiple CFs + maxResults");
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestScannersFromClientSide method testScanOnReopenedRegion.
/**
* Test from client side for scan while the region is reopened
* on the same region server.
*
* @throws Exception
*/
@Test
public void testScanOnReopenedRegion() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 2);
Table ht = TEST_UTIL.createTable(tableName, FAMILY);
Put put;
Scan scan;
Result result;
ResultScanner scanner;
boolean toLog = false;
List<Cell> kvListExp;
// table: row, family, c0:0, c1:1
put = new Put(ROW);
for (int i = 0; i < QUALIFIERS.length; i++) {
KeyValue kv = new KeyValue(ROW, FAMILY, QUALIFIERS[i], i, VALUE);
put.add(kv);
}
ht.put(put);
scan = new Scan().withStartRow(ROW);
scanner = ht.getScanner(scan);
HRegionLocation loc;
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
loc = locator.getRegionLocation(ROW);
}
HRegionInfo hri = loc.getRegionInfo();
MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
byte[] regionName = hri.getRegionName();
int i = cluster.getServerWith(regionName);
HRegionServer rs = cluster.getRegionServer(i);
ProtobufUtil.closeRegion(null, rs.getRSRpcServices(), rs.getServerName(), regionName);
long startTime = EnvironmentEdgeManager.currentTime();
long timeOut = 300000;
while (true) {
if (rs.getOnlineRegion(regionName) == null) {
break;
}
assertTrue("Timed out in closing the testing region", EnvironmentEdgeManager.currentTime() < startTime + timeOut);
Thread.sleep(500);
}
// Now open the region again.
HMaster master = cluster.getMaster();
RegionStates states = master.getAssignmentManager().getRegionStates();
states.regionOffline(hri);
states.updateRegionState(hri, State.OPENING);
ProtobufUtil.openRegion(null, rs.getRSRpcServices(), rs.getServerName(), hri);
startTime = EnvironmentEdgeManager.currentTime();
while (true) {
if (rs.getOnlineRegion(regionName) != null) {
break;
}
assertTrue("Timed out in open the testing region", EnvironmentEdgeManager.currentTime() < startTime + timeOut);
Thread.sleep(500);
}
// c0:0, c1:1
kvListExp = new ArrayList<>();
kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[0], 0, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[1], 1, VALUE));
result = scanner.next();
verifyResult(result, kvListExp, toLog, "Testing scan on re-opened region");
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestScannersFromClientSide method testSmallScan.
@Test
public void testSmallScan() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
int numRows = 10;
byte[][] ROWS = HTestConst.makeNAscii(ROW, numRows);
int numQualifiers = 10;
byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, numQualifiers);
Table ht = TEST_UTIL.createTable(tableName, FAMILY);
Put put;
List<Put> puts = new ArrayList<>();
for (int row = 0; row < ROWS.length; row++) {
put = new Put(ROWS[row]);
for (int qual = 0; qual < QUALIFIERS.length; qual++) {
KeyValue kv = new KeyValue(ROWS[row], FAMILY, QUALIFIERS[qual], VALUE);
put.add(kv);
}
puts.add(put);
}
ht.put(puts);
int expectedRows = numRows;
int expectedCols = numRows * numQualifiers;
// Test normal and reversed
testSmallScan(ht, true, expectedRows, expectedCols);
testSmallScan(ht, false, expectedRows, expectedCols);
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestScannersFromClientSide method testAsyncScanner.
private void testAsyncScanner(TableName table, int rowNumber, int familyNumber, int qualifierNumber, int caching, Consumer<Boolean> listener) throws Exception {
assert rowNumber > 0;
assert familyNumber > 0;
assert qualifierNumber > 0;
byte[] row = Bytes.toBytes("r");
byte[] family = Bytes.toBytes("f");
byte[] qualifier = Bytes.toBytes("q");
byte[][] rows = makeNAsciiWithZeroPrefix(row, rowNumber);
byte[][] families = makeNAsciiWithZeroPrefix(family, familyNumber);
byte[][] qualifiers = makeNAsciiWithZeroPrefix(qualifier, qualifierNumber);
Table ht = TEST_UTIL.createTable(table, families);
boolean toLog = true;
List<Cell> kvListExp = new ArrayList<>();
List<Put> puts = new ArrayList<>();
for (byte[] r : rows) {
Put put = new Put(r);
for (byte[] f : families) {
for (byte[] q : qualifiers) {
KeyValue kv = new KeyValue(r, f, q, 1, VALUE);
put.add(kv);
kvListExp.add(kv);
}
}
puts.add(put);
if (puts.size() > 1000) {
ht.put(puts);
puts.clear();
}
}
if (!puts.isEmpty()) {
ht.put(puts);
puts.clear();
}
Scan scan = new Scan();
scan.setAsyncPrefetch(true);
if (caching > 0) {
scan.setCaching(caching);
}
try (ResultScanner scanner = ht.getScanner(scan)) {
assertTrue("Not instance of async scanner", scanner instanceof ClientAsyncPrefetchScanner);
((ClientAsyncPrefetchScanner) scanner).setPrefetchListener(listener);
List<Cell> kvListScan = new ArrayList<>();
Result result;
boolean first = true;
int actualRows = 0;
while ((result = scanner.next()) != null) {
++actualRows;
// waiting for cache. see HBASE-17376
if (first) {
TimeUnit.SECONDS.sleep(1);
first = false;
}
for (Cell kv : result.listCells()) {
kvListScan.add(kv);
}
}
assertEquals(rowNumber, actualRows);
// These cells may have different rows but it is ok. The Result#getRow
// isn't used in the verifyResult()
result = Result.create(kvListScan);
verifyResult(result, kvListExp, toLog, "Testing async scan");
}
TEST_UTIL.deleteTable(table);
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class SampleRegionWALObserver method preWALWrite.
@Override
public boolean preWALWrite(ObserverContext<? extends WALCoprocessorEnvironment> env, HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
boolean bypass = false;
// check table name matches or not.
if (!Bytes.equals(info.getTable().toBytes(), this.tableName)) {
return bypass;
}
preWALWriteCalled = true;
// here we're going to remove one keyvalue from the WALEdit, and add
// another one to it.
List<Cell> cells = logEdit.getCells();
Cell deletedCell = null;
for (Cell cell : cells) {
// assume only one kv from the WALEdit matches.
byte[] family = CellUtil.cloneFamily(cell);
byte[] qulifier = CellUtil.cloneQualifier(cell);
if (Arrays.equals(family, ignoredFamily) && Arrays.equals(qulifier, ignoredQualifier)) {
LOG.debug("Found the KeyValue from WALEdit which should be ignored.");
deletedCell = cell;
}
if (Arrays.equals(family, changedFamily) && Arrays.equals(qulifier, changedQualifier)) {
LOG.debug("Found the KeyValue from WALEdit which should be changed.");
cell.getValueArray()[cell.getValueOffset()] += 1;
}
}
if (null != row) {
cells.add(new KeyValue(row, addedFamily, addedQualifier));
}
if (deletedCell != null) {
LOG.debug("About to delete a KeyValue from WALEdit.");
cells.remove(deletedCell);
}
return bypass;
}
Aggregations