use of org.apache.hadoop.hbase.regionserver.ScanInfo in project hbase by apache.
the class ZooKeeperScanPolicyObserver method preCompactScannerOpen.
@Override
public InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, Store store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs, InternalScanner s) throws IOException {
ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
if (scanInfo == null) {
// take default action
return null;
}
Scan scan = new Scan();
scan.setMaxVersions(scanInfo.getMaxVersions());
return new StoreScanner(store, scanInfo, scan, scanners, scanType, store.getSmallestReadPoint(), earliestPutTs);
}
use of org.apache.hadoop.hbase.regionserver.ScanInfo in project hbase by apache.
the class ZooKeeperScanPolicyObserver method getScanInfo.
protected ScanInfo getScanInfo(Store store, RegionCoprocessorEnvironment e) {
byte[] data = ((ZKWatcher) e.getSharedData().get(zkkey)).getData();
if (data == null) {
return null;
}
ScanInfo oldSI = store.getScanInfo();
if (oldSI.getTtl() == Long.MAX_VALUE) {
return null;
}
long ttl = Math.max(EnvironmentEdgeManager.currentTime() - Bytes.toLong(data), oldSI.getTtl());
return new ScanInfo(oldSI.getConfiguration(), store.getFamily(), ttl, oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
}
use of org.apache.hadoop.hbase.regionserver.ScanInfo in project hbase by apache.
the class TestDateTieredCompactor method createCompactor.
private DateTieredCompactor createCompactor(StoreFileWritersCapture writers, final KeyValue[] input, List<StoreFile> storefiles) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
final Scanner scanner = new Scanner(input);
// Create store mock that is satisfactory for compactor.
HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS);
ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparator.COMPARATOR);
final Store store = mock(Store.class);
when(store.getStorefiles()).thenReturn(storefiles);
when(store.getFamily()).thenReturn(col);
when(store.getScanInfo()).thenReturn(si);
when(store.areWritesEnabled()).thenReturn(true);
when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
when(store.getComparator()).thenReturn(CellComparator.COMPARATOR);
long maxSequenceId = StoreFile.getMaxSequenceIdInList(storefiles);
when(store.getMaxSequenceId()).thenReturn(maxSequenceId);
return new DateTieredCompactor(conf, store) {
@Override
protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
return scanner;
}
@Override
protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException {
return scanner;
}
};
}
use of org.apache.hadoop.hbase.regionserver.ScanInfo in project hbase by apache.
the class TestUserScanQueryMatcher method testMatch_ExpiredWildcard.
/**
* Verify that {@link ScanQueryMatcher} only skips expired KeyValue instances and does not exit
* early from the row (skipping later non-expired KeyValues). This version mimics a Get with
* wildcard-inferred column qualifiers.
* @throws IOException
*/
@Test
public void testMatch_ExpiredWildcard() throws IOException {
long testTTL = 1000;
MatchCode[] expected = new MatchCode[] { ScanQueryMatcher.MatchCode.INCLUDE, ScanQueryMatcher.MatchCode.INCLUDE, ScanQueryMatcher.MatchCode.SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.INCLUDE, ScanQueryMatcher.MatchCode.SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.DONE };
long now = EnvironmentEdgeManager.currentTime();
UserScanQueryMatcher qm = UserScanQueryMatcher.create(scan, new ScanInfo(this.conf, fam2, 0, 1, testTTL, KeepDeletedCells.FALSE, 0, rowComparator), null, now - testTTL, now, null);
KeyValue[] kvs = new KeyValue[] { new KeyValue(row1, fam2, col1, now - 100, data), new KeyValue(row1, fam2, col2, now - 50, data), new KeyValue(row1, fam2, col3, now - 5000, data), new KeyValue(row1, fam2, col4, now - 500, data), new KeyValue(row1, fam2, col5, now - 10000, data), new KeyValue(row2, fam1, col1, now - 10, data) };
KeyValue k = kvs[0];
qm.setToNewRow(k);
List<ScanQueryMatcher.MatchCode> actual = new ArrayList<>(kvs.length);
for (KeyValue kv : kvs) {
actual.add(qm.match(kv));
}
assertEquals(expected.length, actual.size());
for (int i = 0; i < expected.length; i++) {
LOG.debug("expected " + expected[i] + ", actual " + actual.get(i));
assertEquals(expected[i], actual.get(i));
}
}
use of org.apache.hadoop.hbase.regionserver.ScanInfo in project hbase by apache.
the class TestCompactionScanQueryMatcher method testDropDeletes.
private void testDropDeletes(byte[] from, byte[] to, byte[][] rows, MatchCode... expected) throws IOException {
long now = EnvironmentEdgeManager.currentTime();
// Set time to purge deletes to negative value to avoid it ever happening.
ScanInfo scanInfo = new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, -1L, rowComparator);
CompactionScanQueryMatcher qm = CompactionScanQueryMatcher.create(scanInfo, ScanType.COMPACT_RETAIN_DELETES, Long.MAX_VALUE, HConstants.OLDEST_TIMESTAMP, HConstants.OLDEST_TIMESTAMP, now, from, to, null);
List<ScanQueryMatcher.MatchCode> actual = new ArrayList<>(rows.length);
byte[] prevRow = null;
for (byte[] row : rows) {
if (prevRow == null || !Bytes.equals(prevRow, row)) {
qm.setToNewRow(KeyValueUtil.createFirstOnRow(row));
prevRow = row;
}
actual.add(qm.match(new KeyValue(row, fam2, null, now, Type.Delete)));
}
assertEquals(expected.length, actual.size());
for (int i = 0; i < expected.length; i++) {
LOG.debug("expected " + expected[i] + ", actual " + actual.get(i));
assertEquals(expected[i], actual.get(i));
}
}
Aggregations