use of org.apache.hadoop.hbase.regionserver.ScanInfo in project hbase by apache.
the class ZooKeeperScanPolicyObserver method preFlushScannerOpen.
@Override
public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
if (scanInfo == null) {
// take default action
return null;
}
Scan scan = new Scan();
scan.setMaxVersions(scanInfo.getMaxVersions());
return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner), ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
}
use of org.apache.hadoop.hbase.regionserver.ScanInfo in project hbase by apache.
the class PartitionedMobCompactor method createScanner.
/**
* Creates a store scanner.
* @param filesToCompact The files to be compacted.
* @param scanType The scan type.
* @return The store scanner.
* @throws IOException if IO failure is encountered
*/
private StoreScanner createScanner(List<StoreFile> filesToCompact, ScanType scanType) throws IOException {
List scanners = StoreFileScanner.getScannersForStoreFiles(filesToCompact, false, true, false, false, HConstants.LATEST_TIMESTAMP);
Scan scan = new Scan();
scan.setMaxVersions(column.getMaxVersions());
long ttl = HStore.determineTTLFromFamily(column);
ScanInfo scanInfo = new ScanInfo(conf, column, ttl, 0, CellComparator.COMPARATOR);
return new StoreScanner(scan, scanInfo, scanType, null, scanners, 0L, HConstants.LATEST_TIMESTAMP);
}
use of org.apache.hadoop.hbase.regionserver.ScanInfo in project hbase by apache.
the class TestUserScanQueryMatcher method testMatch_Wildcard.
@Test
public void testMatch_Wildcard() throws IOException {
// Moving up from the Tracker by using Gets and List<KeyValue> instead
// of just byte []
// Expected result
List<MatchCode> expected = new ArrayList<>(6);
expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
expected.add(ScanQueryMatcher.MatchCode.DONE);
long now = EnvironmentEdgeManager.currentTime();
UserScanQueryMatcher qm = UserScanQueryMatcher.create(scan, new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, 0, rowComparator), null, now - ttl, now, null);
List<KeyValue> memstore = new ArrayList<>(6);
memstore.add(new KeyValue(row1, fam2, col1, 1, data));
memstore.add(new KeyValue(row1, fam2, col2, 1, data));
memstore.add(new KeyValue(row1, fam2, col3, 1, data));
memstore.add(new KeyValue(row1, fam2, col4, 1, data));
memstore.add(new KeyValue(row1, fam2, col5, 1, data));
memstore.add(new KeyValue(row2, fam1, col1, 1, data));
List<ScanQueryMatcher.MatchCode> actual = new ArrayList<>(memstore.size());
KeyValue k = memstore.get(0);
qm.setToNewRow(k);
for (KeyValue kv : memstore) {
actual.add(qm.match(kv));
}
assertEquals(expected.size(), actual.size());
for (int i = 0; i < expected.size(); i++) {
LOG.debug("expected " + expected.get(i) + ", actual " + actual.get(i));
assertEquals(expected.get(i), actual.get(i));
}
}
use of org.apache.hadoop.hbase.regionserver.ScanInfo in project hbase by apache.
the class TestUserScanQueryMatcher method testMatch_ExpiredExplicit.
/**
* Verify that {@link ScanQueryMatcher} only skips expired KeyValue instances and does not exit
* early from the row (skipping later non-expired KeyValues). This version mimics a Get with
* explicitly specified column qualifiers.
* @throws IOException
*/
@Test
public void testMatch_ExpiredExplicit() throws IOException {
long testTTL = 1000;
MatchCode[] expected = new MatchCode[] { ScanQueryMatcher.MatchCode.SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW, ScanQueryMatcher.MatchCode.DONE };
long now = EnvironmentEdgeManager.currentTime();
UserScanQueryMatcher qm = UserScanQueryMatcher.create(scan, new ScanInfo(this.conf, fam2, 0, 1, testTTL, KeepDeletedCells.FALSE, 0, rowComparator), get.getFamilyMap().get(fam2), now - testTTL, now, null);
KeyValue[] kvs = new KeyValue[] { new KeyValue(row1, fam2, col1, now - 100, data), new KeyValue(row1, fam2, col2, now - 50, data), new KeyValue(row1, fam2, col3, now - 5000, data), new KeyValue(row1, fam2, col4, now - 500, data), new KeyValue(row1, fam2, col5, now - 10000, data), new KeyValue(row2, fam1, col1, now - 10, data) };
KeyValue k = kvs[0];
qm.setToNewRow(k);
List<MatchCode> actual = new ArrayList<>(kvs.length);
for (KeyValue kv : kvs) {
actual.add(qm.match(kv));
}
assertEquals(expected.length, actual.size());
for (int i = 0; i < expected.length; i++) {
LOG.debug("expected " + expected[i] + ", actual " + actual.get(i));
assertEquals(expected[i], actual.get(i));
}
}
use of org.apache.hadoop.hbase.regionserver.ScanInfo in project hbase by apache.
the class TestStripeCompactor method createCompactor.
private StripeCompactor createCompactor(StoreFileWritersCapture writers, KeyValue[] input) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
final Scanner scanner = new Scanner(input);
// Create store mock that is satisfactory for compactor.
HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS);
ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparator.COMPARATOR);
Store store = mock(Store.class);
when(store.getFamily()).thenReturn(col);
when(store.getScanInfo()).thenReturn(si);
when(store.areWritesEnabled()).thenReturn(true);
when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
when(store.getComparator()).thenReturn(CellComparator.COMPARATOR);
return new StripeCompactor(conf, store) {
@Override
protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
return scanner;
}
@Override
protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException {
return scanner;
}
};
}
Aggregations