use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.
the class TestNonTxIndexBuilder method setup.
/**
* Test setup so that {@link NonTxIndexBuilder#getIndexUpdate(Mutation, IndexMetaData)} can be
* called, where any read requests to
* {@link LocalTable#getCurrentRowState(Mutation, Collection, boolean)} are read from our test
* field 'currentRowCells'
*/
@Before
public void setup() throws Exception {
RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf = new Configuration(false);
conf.set(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName());
Mockito.when(env.getConfiguration()).thenReturn(conf);
// the following is used by LocalTable#getCurrentRowState()
Region mockRegion = Mockito.mock(Region.class);
Mockito.when(env.getRegion()).thenReturn(mockRegion);
Mockito.when(mockRegion.getScanner(Mockito.any(Scan.class))).thenAnswer(new Answer<RegionScanner>() {
@Override
public RegionScanner answer(InvocationOnMock invocation) throws Throwable {
Scan sArg = (Scan) invocation.getArguments()[0];
TimeRange timeRange = sArg.getTimeRange();
return getMockTimeRangeRegionScanner(timeRange);
}
});
// the following is called by PhoenixIndexCodec#getIndexUpserts() , getIndexDeletes()
HRegionInfo mockRegionInfo = Mockito.mock(HRegionInfo.class);
Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo);
Mockito.when(mockRegionInfo.getStartKey()).thenReturn(Bytes.toBytes("a"));
Mockito.when(mockRegionInfo.getEndKey()).thenReturn(Bytes.toBytes("z"));
mockIndexMetaData = Mockito.mock(PhoenixIndexMetaData.class);
Mockito.when(mockIndexMetaData.isImmutableRows()).thenReturn(false);
Mockito.when(mockIndexMetaData.getIndexMaintainers()).thenReturn(Collections.singletonList(getTestIndexMaintainer()));
indexBuilder = new NonTxIndexBuilder();
indexBuilder.setup(env);
}
use of org.apache.hadoop.hbase.regionserver.RegionScanner in project ranger by apache.
the class RangerAuthorizationCoprocessor method preScannerOpen.
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
final RegionScanner ret;
if (LOG.isDebugEnabled()) {
LOG.debug("==> RangerAuthorizationCoprocessor.preScannerOpen()");
}
try {
activatePluginClassLoader();
ret = implRegionObserver.preScannerOpen(c, scan, s);
} finally {
deactivatePluginClassLoader();
}
if (LOG.isDebugEnabled()) {
LOG.debug("<== RangerAuthorizationCoprocessor.preScannerOpen()");
}
return ret;
}
use of org.apache.hadoop.hbase.regionserver.RegionScanner in project ranger by apache.
the class RangerAuthorizationCoprocessor method postScannerOpen.
@Override
public RegionScanner postScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
final RegionScanner ret;
if (LOG.isDebugEnabled()) {
LOG.debug("==> RangerAuthorizationCoprocessor.postScannerOpen()");
}
try {
activatePluginClassLoader();
ret = implRegionObserver.postScannerOpen(c, scan, s);
} finally {
deactivatePluginClassLoader();
}
if (LOG.isDebugEnabled()) {
LOG.debug("<== RangerAuthorizationCoprocessor.postScannerOpen()");
}
return ret;
}
use of org.apache.hadoop.hbase.regionserver.RegionScanner in project cdap by caskdata.
the class IncrementHandler method preGetOp.
@Override
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> ctx, Get get, List<Cell> results) throws IOException {
Scan scan = new Scan(get);
scan.setMaxVersions();
scan.setFilter(Filters.combine(new IncrementFilter(), scan.getFilter()));
RegionScanner scanner = null;
try {
scanner = new IncrementSummingScanner(region, scan.getBatch(), region.getScanner(scan), ScanType.USER_SCAN);
scanner.next(results);
ctx.bypass();
} finally {
if (scanner != null) {
scanner.close();
}
}
}
use of org.apache.hadoop.hbase.regionserver.RegionScanner in project cdap by caskdata.
the class IncrementSummingScannerTest method testMultiColumnFlushAndCompact.
@Test
public void testMultiColumnFlushAndCompact() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "testMultiColumnFlushAndCompact");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
byte[] columnBytes2 = Bytes.toBytes("c2");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
long now = 1;
byte[] row1 = Bytes.toBytes("row1");
byte[] row2 = Bytes.toBytes("row2");
// Initial put to row1,c2
Put row1P = new Put(row1);
row1P.add(familyBytes, columnBytes2, now - 1, Bytes.toBytes(5L));
region.put(row1P);
// Initial put to row2,c
Put row2P = new Put(row2);
row2P.add(familyBytes, columnBytes, now - 1, Bytes.toBytes(10L));
region.put(row2P);
// Generate some increments
long ts = now;
for (int i = 0; i < 50; i++) {
region.put(generateIncrementPut(familyBytes, columnBytes, row1, ts));
region.put(generateIncrementPut(familyBytes, columnBytes, row2, ts));
region.put(generateIncrementPut(familyBytes, columnBytes2, row1, ts));
ts++;
}
// First scanner represents flush scanner
RegionScanner scanner = new IncrementSummingScanner(region, -1, region.getScanner(new Scan().setMaxVersions()), ScanType.COMPACT_RETAIN_DELETES, now + 15, -1);
// Second scanner is a user scan, this is to help in easy asserts
scanner = new IncrementSummingScanner(region, -1, scanner, ScanType.USER_SCAN);
List<Cell> results = Lists.newArrayList();
assertTrue(scanner.next(results, 10));
assertEquals(2, results.size());
Cell cell = results.get(0);
assertNotNull(cell);
assertEquals("row1", Bytes.toString(cell.getRow()));
assertEquals("c", Bytes.toString(cell.getQualifier()));
assertEquals(50, Bytes.toLong(cell.getValue()));
cell = results.get(1);
assertNotNull(cell);
assertEquals("row1", Bytes.toString(cell.getRow()));
assertEquals("c2", Bytes.toString(cell.getQualifier()));
assertEquals(55, Bytes.toLong(cell.getValue()));
results.clear();
assertFalse(scanner.next(results, 10));
assertEquals(1, results.size());
cell = results.get(0);
assertNotNull(cell);
assertEquals("row2", Bytes.toString(cell.getRow()));
assertEquals(60, Bytes.toLong(cell.getValue()));
} finally {
region.close();
}
}
Aggregations