use of org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost in project hbase by apache.
the class TestRegionObserverScannerOpenHook method testRegionObserverScanTimeStacking.
@Test
public void testRegionObserverScanTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
// Use new HTU to not overlap with the DFS cluster started in #CompactionStacking
Configuration conf = new HBaseTestingUtil().getConfiguration();
HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
Put put = new Put(ROW);
put.addColumn(A, A, A);
region.put(put);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull("Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " + r, r.listCells());
HBaseTestingUtil.closeRegionAndWAL(region);
}
use of org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost in project hbase by apache.
the class TestRegionCoprocessorHost method testPostScannerFilterRow.
@Test
public void testPostScannerFilterRow() throws IOException {
// By default SimpleRegionObserver is set as region coprocessor which implements
// postScannerFilterRow
RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf);
assertTrue("Region coprocessor implement postScannerFilterRow", host.hasCustomPostScannerFilterRow());
// Set a region CP which doesn't implement postScannerFilterRow
init(true);
host = new RegionCoprocessorHost(region, rsServices, conf);
assertFalse("Region coprocessor implement postScannerFilterRow", host.hasCustomPostScannerFilterRow());
// Set multiple region CPs, in which one implements postScannerFilterRow
init(false);
host = new RegionCoprocessorHost(region, rsServices, conf);
assertTrue("Region coprocessor doesn't implement postScannerFilterRow", host.hasCustomPostScannerFilterRow());
}
use of org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost in project hbase by apache.
the class TestRegionCoprocessorHost method testPreCompactScannerOpen.
@Test
public void testPreCompactScannerOpen() throws IOException {
RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf);
ScanInfo oldScanInfo = getScanInfo();
HStore store = mock(HStore.class);
when(store.getScanInfo()).thenReturn(oldScanInfo);
ScanInfo newScanInfo = host.preCompactScannerOpen(store, ScanType.COMPACT_DROP_DELETES, mock(CompactionLifeCycleTracker.class), mock(CompactionRequest.class), mock(User.class));
verifyScanInfo(newScanInfo);
}
use of org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost in project hbase by apache.
the class TestRegionCoprocessorHost method testPreMemStoreCompactionCompactScannerOpen.
@Test
public void testPreMemStoreCompactionCompactScannerOpen() throws IOException {
RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf);
ScanInfo oldScanInfo = getScanInfo();
HStore store = mock(HStore.class);
when(store.getScanInfo()).thenReturn(oldScanInfo);
ScanInfo newScanInfo = host.preMemStoreCompactionCompactScannerOpen(store);
verifyScanInfo(newScanInfo);
}
use of org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost in project hbase by apache.
the class TestRegionObserverPreFlushAndPreCompact method testPreFlushReturningNull.
/**
* Ensure we get expected exception when we try to return null from a preFlush call.
* @throws IOException We expect it to throw {@link CoprocessorException}
*/
@Test(expected = CoprocessorException.class)
public void testPreFlushReturningNull() throws IOException {
RegionCoprocessorHost rch = getRegionCoprocessorHost();
rch.preFlush(null, null, null);
}
Aggregations