use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class CompressionTest method doSmokeTest.
public static void doSmokeTest(FileSystem fs, Path path, String codec) throws Exception {
Configuration conf = HBaseConfiguration.create();
HFileContext context = new HFileContextBuilder().withCompression(HFileWriterImpl.compressionByName(codec)).build();
HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withPath(fs, path).withFileContext(context).create();
// Write any-old Cell...
final byte[] rowKey = Bytes.toBytes("compressiontestkey");
Cell c = CellUtil.createCell(rowKey, Bytes.toBytes("compressiontestval"));
writer.append(c);
writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), Bytes.toBytes("compressioninfoval"));
writer.close();
Cell cc = null;
HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
try {
reader.loadFileInfo();
HFileScanner scanner = reader.getScanner(false, true);
// position to the start of file
scanner.seekTo();
// Scanner does not do Cells yet. Do below for now till fixed.
cc = scanner.getCell();
if (CellComparator.COMPARATOR.compareRows(c, cc) != 0) {
throw new Exception("Read back incorrect result: " + c.toString() + " vs " + cc.toString());
}
} finally {
reader.close();
}
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class TestScannersFromClientSide method testGetRowOffset.
/**
* Test from client side for get with rowOffset
*
* @throws Exception
*/
@Test
public void testGetRowOffset() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[][] FAMILIES = HTestConst.makeNAscii(FAMILY, 3);
byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 20);
Table ht = TEST_UTIL.createTable(tableName, FAMILIES);
Get get;
Put put;
Result result;
boolean toLog = true;
List<Cell> kvListExp;
// Insert one CF for row
kvListExp = new ArrayList<>();
put = new Put(ROW);
for (int i = 0; i < 10; i++) {
KeyValue kv = new KeyValue(ROW, FAMILIES[0], QUALIFIERS[i], 1, VALUE);
put.add(kv);
// skipping first two kvs
if (i < 2)
continue;
kvListExp.add(kv);
}
ht.put(put);
//setting offset to 2
get = new Get(ROW);
get.setRowOffsetPerColumnFamily(2);
result = ht.get(get);
verifyResult(result, kvListExp, toLog, "Testing basic setRowOffset");
//setting offset to 20
get = new Get(ROW);
get.setRowOffsetPerColumnFamily(20);
result = ht.get(get);
kvListExp = new ArrayList<>();
verifyResult(result, kvListExp, toLog, "Testing offset > #kvs");
//offset + maxResultPerCF
get = new Get(ROW);
get.setRowOffsetPerColumnFamily(4);
get.setMaxResultsPerColumnFamily(5);
result = ht.get(get);
kvListExp = new ArrayList<>();
for (int i = 4; i < 9; i++) {
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[i], 1, VALUE));
}
verifyResult(result, kvListExp, toLog, "Testing offset + setMaxResultsPerCF");
// Filters: ColumnRangeFilter
get = new Get(ROW);
get.setRowOffsetPerColumnFamily(1);
get.setFilter(new ColumnRangeFilter(QUALIFIERS[2], true, QUALIFIERS[5], true));
result = ht.get(get);
kvListExp = new ArrayList<>();
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[3], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[4], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[5], 1, VALUE));
verifyResult(result, kvListExp, toLog, "Testing offset with CRF");
// 10 columns for CF2, 10 columns for CF1
for (int j = 2; j > 0; j--) {
put = new Put(ROW);
for (int i = 0; i < 10; i++) {
KeyValue kv = new KeyValue(ROW, FAMILIES[j], QUALIFIERS[i], 1, VALUE);
put.add(kv);
}
ht.put(put);
}
get = new Get(ROW);
get.setRowOffsetPerColumnFamily(4);
get.setMaxResultsPerColumnFamily(2);
get.addFamily(FAMILIES[1]);
get.addFamily(FAMILIES[2]);
result = ht.get(get);
kvListExp = new ArrayList<>();
//Exp: CF1:q4, q5, CF2: q4, q5
kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[4], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[5], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[4], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[5], 1, VALUE));
verifyResult(result, kvListExp, toLog, "Testing offset + multiple CFs + maxResults");
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class TestScannersFromClientSide method testScanOnReopenedRegion.
/**
* Test from client side for scan while the region is reopened
* on the same region server.
*
* @throws Exception
*/
@Test
public void testScanOnReopenedRegion() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 2);
Table ht = TEST_UTIL.createTable(tableName, FAMILY);
Put put;
Scan scan;
Result result;
ResultScanner scanner;
boolean toLog = false;
List<Cell> kvListExp;
// table: row, family, c0:0, c1:1
put = new Put(ROW);
for (int i = 0; i < QUALIFIERS.length; i++) {
KeyValue kv = new KeyValue(ROW, FAMILY, QUALIFIERS[i], i, VALUE);
put.add(kv);
}
ht.put(put);
scan = new Scan().withStartRow(ROW);
scanner = ht.getScanner(scan);
HRegionLocation loc;
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
loc = locator.getRegionLocation(ROW);
}
HRegionInfo hri = loc.getRegionInfo();
MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
byte[] regionName = hri.getRegionName();
int i = cluster.getServerWith(regionName);
HRegionServer rs = cluster.getRegionServer(i);
ProtobufUtil.closeRegion(null, rs.getRSRpcServices(), rs.getServerName(), regionName);
long startTime = EnvironmentEdgeManager.currentTime();
long timeOut = 300000;
while (true) {
if (rs.getOnlineRegion(regionName) == null) {
break;
}
assertTrue("Timed out in closing the testing region", EnvironmentEdgeManager.currentTime() < startTime + timeOut);
Thread.sleep(500);
}
// Now open the region again.
HMaster master = cluster.getMaster();
RegionStates states = master.getAssignmentManager().getRegionStates();
states.regionOffline(hri);
states.updateRegionState(hri, State.OPENING);
ProtobufUtil.openRegion(null, rs.getRSRpcServices(), rs.getServerName(), hri);
startTime = EnvironmentEdgeManager.currentTime();
while (true) {
if (rs.getOnlineRegion(regionName) != null) {
break;
}
assertTrue("Timed out in open the testing region", EnvironmentEdgeManager.currentTime() < startTime + timeOut);
Thread.sleep(500);
}
// c0:0, c1:1
kvListExp = new ArrayList<>();
kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[0], 0, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[1], 1, VALUE));
result = scanner.next();
verifyResult(result, kvListExp, toLog, "Testing scan on re-opened region");
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class TestScannersFromClientSide method testAsyncScanner.
private void testAsyncScanner(TableName table, int rowNumber, int familyNumber, int qualifierNumber, int caching, Consumer<Boolean> listener) throws Exception {
assert rowNumber > 0;
assert familyNumber > 0;
assert qualifierNumber > 0;
byte[] row = Bytes.toBytes("r");
byte[] family = Bytes.toBytes("f");
byte[] qualifier = Bytes.toBytes("q");
byte[][] rows = makeNAsciiWithZeroPrefix(row, rowNumber);
byte[][] families = makeNAsciiWithZeroPrefix(family, familyNumber);
byte[][] qualifiers = makeNAsciiWithZeroPrefix(qualifier, qualifierNumber);
Table ht = TEST_UTIL.createTable(table, families);
boolean toLog = true;
List<Cell> kvListExp = new ArrayList<>();
List<Put> puts = new ArrayList<>();
for (byte[] r : rows) {
Put put = new Put(r);
for (byte[] f : families) {
for (byte[] q : qualifiers) {
KeyValue kv = new KeyValue(r, f, q, 1, VALUE);
put.add(kv);
kvListExp.add(kv);
}
}
puts.add(put);
if (puts.size() > 1000) {
ht.put(puts);
puts.clear();
}
}
if (!puts.isEmpty()) {
ht.put(puts);
puts.clear();
}
Scan scan = new Scan();
scan.setAsyncPrefetch(true);
if (caching > 0) {
scan.setCaching(caching);
}
try (ResultScanner scanner = ht.getScanner(scan)) {
assertTrue("Not instance of async scanner", scanner instanceof ClientAsyncPrefetchScanner);
((ClientAsyncPrefetchScanner) scanner).setPrefetchListener(listener);
List<Cell> kvListScan = new ArrayList<>();
Result result;
boolean first = true;
int actualRows = 0;
while ((result = scanner.next()) != null) {
++actualRows;
// waiting for cache. see HBASE-17376
if (first) {
TimeUnit.SECONDS.sleep(1);
first = false;
}
for (Cell kv : result.listCells()) {
kvListScan.add(kv);
}
}
assertEquals(rowNumber, actualRows);
// These cells may have different rows but it is ok. The Result#getRow
// isn't used in the verifyResult()
result = Result.create(kvListScan);
verifyResult(result, kvListExp, toLog, "Testing async scan");
}
TEST_UTIL.deleteTable(table);
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class TestTableSnapshotScanner method verifyRow.
private static void verifyRow(Result result) throws IOException {
byte[] row = result.getRow();
CellScanner scanner = result.cellScanner();
while (scanner.advance()) {
Cell cell = scanner.current();
//assert that all Cells in the Result have the same key
Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length, cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
}
for (int j = 0; j < FAMILIES.length; j++) {
byte[] actual = result.getValue(FAMILIES[j], FAMILIES[j]);
Assert.assertArrayEquals("Row in snapshot does not match, expected:" + Bytes.toString(row) + " ,actual:" + Bytes.toString(actual), row, actual);
}
}
Aggregations