use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestReplicationSmallTests method testHBase14905.
@Test(timeout = 300000)
public // VerifyReplication should honor versions option
void testHBase14905() throws Exception {
// normal Batch tests
byte[] qualifierName = Bytes.toBytes("f1");
Put put = new Put(Bytes.toBytes("r1"));
put.addColumn(famName, qualifierName, Bytes.toBytes("v1002"));
htable1.put(put);
put.addColumn(famName, qualifierName, Bytes.toBytes("v1001"));
htable1.put(put);
put.addColumn(famName, qualifierName, Bytes.toBytes("v1112"));
htable1.put(put);
Scan scan = new Scan();
scan.setMaxVersions(100);
ResultScanner scanner1 = htable1.getScanner(scan);
Result[] res1 = scanner1.next(1);
scanner1.close();
assertEquals(1, res1.length);
assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size());
for (int i = 0; i < NB_RETRIES; i++) {
scan = new Scan();
scan.setMaxVersions(100);
scanner1 = htable2.getScanner(scan);
res1 = scanner1.next(1);
scanner1.close();
if (res1.length != 1) {
LOG.info("Only got " + res1.length + " rows");
Thread.sleep(SLEEP_TIME);
} else {
int cellNumber = res1[0].getColumnCells(famName, Bytes.toBytes("f1")).size();
if (cellNumber != 3) {
LOG.info("Only got " + cellNumber + " cells");
Thread.sleep(SLEEP_TIME);
} else {
break;
}
}
if (i == NB_RETRIES - 1) {
fail("Waited too much time for normal batch replication");
}
}
put.addColumn(famName, qualifierName, Bytes.toBytes("v1111"));
htable2.put(put);
put.addColumn(famName, qualifierName, Bytes.toBytes("v1112"));
htable2.put(put);
scan = new Scan();
scan.setMaxVersions(100);
scanner1 = htable2.getScanner(scan);
res1 = scanner1.next(NB_ROWS_IN_BATCH);
scanner1.close();
assertEquals(1, res1.length);
assertEquals(5, res1[0].getColumnCells(famName, qualifierName).size());
String[] args = new String[] { "--versions=100", PEER_ID, tableName.getNameAsString() };
runVerifyReplication(args, 0, 1);
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class OfflineMetaRebuildTestCore method tableRowCount.
/**
* Returns the number of rows in a given table. HBase must be up and the table
* should be present (will wait for timeout for a while otherwise)
*
* @return # of rows in the specified table
*/
protected int tableRowCount(Configuration conf, TableName table) throws IOException {
Table t = TEST_UTIL.getConnection().getTable(table);
Scan st = new Scan();
ResultScanner rst = t.getScanner(st);
int count = 0;
for (@SuppressWarnings("unused") Result rt : rst) {
count++;
}
t.close();
return count;
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestResultSizeEstimation method testResultSizeEstimation.
@Test
public void testResultSizeEstimation() throws Exception {
byte[] ROW1 = Bytes.toBytes("testRow1");
byte[] ROW2 = Bytes.toBytes("testRow2");
byte[] FAMILY = Bytes.toBytes("testFamily");
byte[] QUALIFIER = Bytes.toBytes("testQualifier");
byte[] VALUE = Bytes.toBytes("testValue");
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[][] FAMILIES = new byte[][] { FAMILY };
Table table = TEST_UTIL.createTable(tableName, FAMILIES);
Put p = new Put(ROW1);
p.add(new KeyValue(ROW1, FAMILY, QUALIFIER, Long.MAX_VALUE, VALUE));
table.put(p);
p = new Put(ROW2);
p.add(new KeyValue(ROW2, FAMILY, QUALIFIER, Long.MAX_VALUE, VALUE));
table.put(p);
Scan s = new Scan();
s.setMaxResultSize(SCANNER_DATA_LIMIT);
ResultScanner rs = table.getScanner(s);
int count = 0;
while (rs.next() != null) {
count++;
}
assertEquals("Result size estimation did not work properly", 2, count);
rs.close();
table.close();
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestLoadAndSwitchEncodeOnDisk method assertAllOnLine.
private void assertAllOnLine(final Table t) throws IOException {
List<HRegionLocation> regions;
try (RegionLocator rl = TEST_UTIL.getConnection().getRegionLocator(t.getName())) {
regions = rl.getAllRegionLocations();
}
for (HRegionLocation e : regions) {
byte[] startkey = e.getRegionInfo().getStartKey();
Scan s = new Scan(startkey);
ResultScanner scanner = t.getScanner(s);
Result r = scanner.next();
org.junit.Assert.assertTrue(r != null && r.size() > 0);
scanner.close();
}
}
use of org.apache.hadoop.hbase.client.ResultScanner in project javaee7-samples by javaee-samples.
the class PersonSessionBean method getPersons.
public List<Person> getPersons() throws IOException {
List<Person> persons = new ArrayList<>();
try (HTableInterface table = pool.getTable(personsTable)) {
Scan scan = new Scan();
scan.addFamily(Bytes.toBytes(personsColumnFamily));
try (ResultScanner resultScanner = table.getScanner(scan)) {
for (Result result : resultScanner) {
for (KeyValue kv : result.raw()) {
Person p = new Person();
// p.setTitle(Bytes.toString(kv.getQualifier()));
// p.setBody(Bytes.toString(kv.getValue()));
// p.setId(Bytes.toString(result.getRow()));
persons.add(person);
}
}
}
}
return persons;
}
Aggregations