use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestRegionObserverInterface method testHBase3583.
@Test(timeout = 300000)
public // HBase-3583
void testHBase3583() throws IOException {
final TableName tableName = TableName.valueOf(name.getMethodName());
util.createTable(tableName, new byte[][] { A, B, C });
util.waitUntilAllRegionsAssigned(tableName);
verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet", "wasScannerNextCalled", "wasScannerCloseCalled" }, tableName, new Boolean[] { false, false, false, false });
Table table = util.getConnection().getTable(tableName);
Put put = new Put(ROW);
put.addColumn(A, A, A);
table.put(put);
Get get = new Get(ROW);
get.addColumn(A, A);
table.get(get);
// verify that scannerNext and scannerClose upcalls won't be invoked
// when we perform get().
verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet", "wasScannerNextCalled", "wasScannerCloseCalled" }, tableName, new Boolean[] { true, true, false, false });
Scan s = new Scan();
ResultScanner scanner = table.getScanner(s);
try {
for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
}
} finally {
scanner.close();
}
// now scanner hooks should be invoked.
verifyMethodResult(SimpleRegionObserver.class, new String[] { "wasScannerNextCalled", "wasScannerCloseCalled" }, tableName, new Boolean[] { true, true });
util.deleteTable(tableName);
table.close();
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestRegionObserverInterface method testCompactionOverride.
/**
* Tests overriding compaction handling via coprocessor hooks
* @throws Exception
*/
@Test(timeout = 300000)
public void testCompactionOverride() throws Exception {
final TableName compactTable = TableName.valueOf(name.getMethodName());
Admin admin = util.getAdmin();
if (admin.tableExists(compactTable)) {
admin.disableTable(compactTable);
admin.deleteTable(compactTable);
}
HTableDescriptor htd = new HTableDescriptor(compactTable);
htd.addFamily(new HColumnDescriptor(A));
htd.addCoprocessor(EvenOnlyCompactor.class.getName());
admin.createTable(htd);
Table table = util.getConnection().getTable(compactTable);
for (long i = 1; i <= 10; i++) {
byte[] iBytes = Bytes.toBytes(i);
Put put = new Put(iBytes);
put.setDurability(Durability.SKIP_WAL);
put.addColumn(A, A, iBytes);
table.put(put);
}
HRegion firstRegion = cluster.getRegions(compactTable).get(0);
Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class.getName());
assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp);
EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp;
// force a compaction
long ts = System.currentTimeMillis();
admin.flush(compactTable);
// wait for flush
for (int i = 0; i < 10; i++) {
if (compactor.lastFlush >= ts) {
break;
}
Thread.sleep(1000);
}
assertTrue("Flush didn't complete", compactor.lastFlush >= ts);
LOG.debug("Flush complete");
ts = compactor.lastFlush;
admin.majorCompact(compactTable);
// wait for compaction
for (int i = 0; i < 30; i++) {
if (compactor.lastCompaction >= ts) {
break;
}
Thread.sleep(1000);
}
LOG.debug("Last compaction was at " + compactor.lastCompaction);
assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts);
// only even rows should remain
ResultScanner scanner = table.getScanner(new Scan());
try {
for (long i = 2; i <= 10; i += 2) {
Result r = scanner.next();
assertNotNull(r);
assertFalse(r.isEmpty());
byte[] iBytes = Bytes.toBytes(i);
assertArrayEquals("Row should be " + i, r.getRow(), iBytes);
assertArrayEquals("Value should be " + i, r.getValue(A, A), iBytes);
}
} finally {
scanner.close();
}
table.close();
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestMultiRowRangeFilter method getScanResult.
private List<Cell> getScanResult(byte[] startRow, byte[] stopRow, Table ht) throws IOException {
Scan scan = new Scan();
scan.setMaxVersions();
if (!Bytes.toString(startRow).isEmpty()) {
scan.setStartRow(startRow);
}
if (!Bytes.toString(stopRow).isEmpty()) {
scan.setStopRow(stopRow);
}
ResultScanner scanner = ht.getScanner(scan);
List<Cell> kvList = new ArrayList<>();
Result r;
while ((r = scanner.next()) != null) {
for (Cell kv : r.listCells()) {
kvList.add(kv);
}
}
scanner.close();
return kvList;
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestHFileOutputFormat2 method doIncrementalLoadTest.
private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, boolean putSortReducer, String tableStr) throws Exception {
util = new HBaseTestingUtility();
Configuration conf = util.getConfiguration();
conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality);
int hostCount = 1;
int regionNum = 5;
if (shouldKeepLocality) {
// We should change host count higher than hdfs replica count when MiniHBaseCluster supports
// explicit hostnames parameter just like MiniDFSCluster does.
hostCount = 3;
regionNum = 20;
}
byte[][] splitKeys = generateRandomSplitKeys(regionNum - 1);
String[] hostnames = new String[hostCount];
for (int i = 0; i < hostCount; ++i) {
hostnames[i] = "datanode_" + i;
}
util.startMiniCluster(1, hostCount, hostnames);
TableName tableName = TableName.valueOf(tableStr);
Table table = util.createTable(tableName, FAMILIES, splitKeys);
Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
FileSystem fs = testDir.getFileSystem(conf);
try (RegionLocator r = util.getConnection().getRegionLocator(tableName);
Admin admin = util.getConnection().getAdmin()) {
assertEquals("Should start with empty table", 0, util.countRows(table));
int numRegions = r.getStartKeys().length;
assertEquals("Should make " + regionNum + " regions", numRegions, regionNum);
// Generate the bulk load files
runIncrementalPELoad(conf, table.getTableDescriptor(), r, testDir, putSortReducer);
// This doesn't write into the table, just makes files
assertEquals("HFOF should not touch actual table", 0, util.countRows(table));
// Make sure that a directory was created for every CF
int dir = 0;
for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
for (byte[] family : FAMILIES) {
if (Bytes.toString(family).equals(f.getPath().getName())) {
++dir;
}
}
}
assertEquals("Column family not found in FS.", FAMILIES.length, dir);
// handle the split case
if (shouldChangeRegions) {
LOG.info("Changing regions in table");
admin.disableTable(table.getName());
util.waitUntilNoRegionsInTransition();
util.deleteTable(table.getName());
byte[][] newSplitKeys = generateRandomSplitKeys(14);
table = util.createTable(tableName, FAMILIES, newSplitKeys);
while (util.getConnection().getRegionLocator(tableName).getAllRegionLocations().size() != 15 || !admin.isTableAvailable(table.getName())) {
Thread.sleep(200);
LOG.info("Waiting for new region assignment to happen");
}
}
// Perform the actual load
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, r);
// Ensure data shows up
int expectedRows = 0;
if (putSortReducer) {
// no rows should be extracted
assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table));
} else {
expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table));
Scan scan = new Scan();
ResultScanner results = table.getScanner(scan);
for (Result res : results) {
assertEquals(FAMILIES.length, res.rawCells().length);
Cell first = res.rawCells()[0];
for (Cell kv : res.rawCells()) {
assertTrue(CellUtil.matchingRow(first, kv));
assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
}
}
results.close();
}
String tableDigestBefore = util.checksumRows(table);
// Check region locality
HDFSBlocksDistribution hbd = new HDFSBlocksDistribution();
for (HRegion region : util.getHBaseCluster().getRegions(tableName)) {
hbd.add(region.getHDFSBlocksDistribution());
}
for (String hostname : hostnames) {
float locality = hbd.getBlockLocalityIndex(hostname);
LOG.info("locality of [" + hostname + "]: " + locality);
assertEquals(100, (int) (locality * 100));
}
// Cause regions to reopen
admin.disableTable(tableName);
while (!admin.isTableDisabled(tableName)) {
Thread.sleep(200);
LOG.info("Waiting for table to disable");
}
admin.enableTable(tableName);
util.waitTableAvailable(tableName);
assertEquals("Data should remain after reopening of regions", tableDigestBefore, util.checksumRows(table));
} finally {
testDir.getFileSystem(conf).delete(testDir, true);
util.deleteTable(tableName);
util.shutdownMiniCluster();
}
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestImportExport method testWithDeletes.
@Test
public void testWithDeletes() throws Exception {
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE));
UTIL.getAdmin().createTable(desc);
try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
Put p = new Put(ROW1);
p.addColumn(FAMILYA, QUAL, now, QUAL);
p.addColumn(FAMILYA, QUAL, now + 1, QUAL);
p.addColumn(FAMILYA, QUAL, now + 2, QUAL);
p.addColumn(FAMILYA, QUAL, now + 3, QUAL);
p.addColumn(FAMILYA, QUAL, now + 4, QUAL);
t.put(p);
Delete d = new Delete(ROW1, now + 3);
t.delete(d);
d = new Delete(ROW1);
d.addColumns(FAMILYA, QUAL, now + 2);
t.delete(d);
}
String[] args = new String[] { "-D" + Export.RAW_SCAN + "=true", name.getMethodName(), FQ_OUTPUT_DIR, // max number of key versions per key to export
"1000" };
assertTrue(runExport(args));
final String IMPORT_TABLE = name.getMethodName() + "import";
desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE));
desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE));
UTIL.getAdmin().createTable(desc);
try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR };
assertTrue(runImport(args));
Scan s = new Scan();
s.setMaxVersions();
s.setRaw(true);
ResultScanner scanner = t.getScanner(s);
Result r = scanner.next();
Cell[] res = r.rawCells();
assertTrue(CellUtil.isDeleteFamily(res[0]));
assertEquals(now + 4, res[1].getTimestamp());
assertEquals(now + 3, res[2].getTimestamp());
assertTrue(CellUtil.isDelete(res[3]));
assertEquals(now + 2, res[4].getTimestamp());
assertEquals(now + 1, res[5].getTimestamp());
assertEquals(now, res[6].getTimestamp());
}
}
Aggregations