use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.
the class TestFilter method verifyScanFull.
private void verifyScanFull(Scan s, KeyValue[] kvs) throws IOException {
InternalScanner scanner = this.region.getScanner(s);
List<Cell> results = new ArrayList<>();
int row = 0;
int idx = 0;
for (boolean done = true; done; row++) {
done = scanner.next(results);
Arrays.sort(results.toArray(new Cell[results.size()]), CellComparator.COMPARATOR);
if (results.isEmpty())
break;
assertTrue("Scanned too many keys! Only expected " + kvs.length + " total but already scanned " + (results.size() + idx) + (results.isEmpty() ? "" : "(" + results.get(0).toString() + ")"), kvs.length >= idx + results.size());
for (Cell kv : results) {
LOG.info("row=" + row + ", result=" + kv.toString() + ", match=" + kvs[idx].toString());
assertTrue("Row mismatch", CellUtil.matchingRow(kv, kvs[idx]));
assertTrue("Family mismatch", CellUtil.matchingFamily(kv, kvs[idx]));
assertTrue("Qualifier mismatch", CellUtil.matchingQualifier(kv, kvs[idx]));
assertTrue("Value mismatch", CellUtil.matchingValue(kv, kvs[idx]));
idx++;
}
results.clear();
}
LOG.info("Looked at " + row + " rows with " + idx + " keys");
assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, kvs.length, idx);
}
use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.
the class TestFilter method tes94FilterRowCompatibility.
/**
* The following test is to ensure old(such as hbase0.94) filterRow() can be correctly fired in
* 0.96+ code base.
*
* See HBASE-10366
*
* @throws Exception
*/
@Test
public void tes94FilterRowCompatibility() throws Exception {
Scan s = new Scan();
OldTestFilter filter = new OldTestFilter();
s.setFilter(filter);
InternalScanner scanner = this.region.getScanner(s);
ArrayList<Cell> values = new ArrayList<>();
scanner.next(values);
assertTrue("All rows should be filtered out", values.isEmpty());
}
use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.
the class TestFilter method verifyScanFullNoValues.
private void verifyScanFullNoValues(Scan s, KeyValue[] kvs, boolean useLen) throws IOException {
InternalScanner scanner = this.region.getScanner(s);
List<Cell> results = new ArrayList<>();
int row = 0;
int idx = 0;
for (boolean more = true; more; row++) {
more = scanner.next(results);
Arrays.sort(results.toArray(new Cell[results.size()]), CellComparator.COMPARATOR);
if (results.isEmpty())
break;
assertTrue("Scanned too many keys! Only expected " + kvs.length + " total but already scanned " + (results.size() + idx) + (results.isEmpty() ? "" : "(" + results.get(0).toString() + ")"), kvs.length >= idx + results.size());
for (Cell kv : results) {
LOG.info("row=" + row + ", result=" + kv.toString() + ", match=" + kvs[idx].toString());
assertTrue("Row mismatch", CellUtil.matchingRow(kv, kvs[idx]));
assertTrue("Family mismatch", CellUtil.matchingFamily(kv, kvs[idx]));
assertTrue("Qualifier mismatch", CellUtil.matchingQualifier(kv, kvs[idx]));
assertFalse("Should not have returned whole value", CellUtil.matchingValue(kv, kvs[idx]));
if (useLen) {
assertEquals("Value in result is not SIZEOF_INT", kv.getValueLength(), Bytes.SIZEOF_INT);
LOG.info("idx = " + idx + ", len=" + kvs[idx].getValueLength() + ", actual=" + Bytes.toInt(CellUtil.cloneValue(kv)));
assertEquals("Scan value should be the length of the actual value. ", kvs[idx].getValueLength(), Bytes.toInt(CellUtil.cloneValue(kv)));
LOG.info("good");
} else {
assertEquals("Value in result is not empty", kv.getValueLength(), 0);
}
idx++;
}
results.clear();
}
LOG.info("Looked at " + row + " rows with " + idx + " keys");
assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, kvs.length, idx);
}
use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.
the class TestScannerSelectionUsingKeyRange method testScannerSelection.
@Test
public void testScannerSelection() throws IOException {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt("hbase.hstore.compactionThreshold", 10000);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true).setBloomFilterType(bloomType);
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(TABLE);
Region region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), conf, htd);
for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
Put put = new Put(Bytes.toBytes("row" + iRow));
for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
put.addColumn(FAMILY_BYTES, Bytes.toBytes("col" + iCol), Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
}
region.put(put);
}
region.flush(true);
}
Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
CacheConfig.blockCacheDisabled = false;
CacheConfig cacheConf = new CacheConfig(conf);
LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
cache.clearCache();
InternalScanner scanner = region.getScanner(scan);
List<Cell> results = new ArrayList<>();
while (scanner.next(results)) {
}
scanner.close();
assertEquals(0, results.size());
Set<String> accessedFiles = cache.getCachedFileNamesForTest();
assertEquals(expectedCount, accessedFiles.size());
HBaseTestingUtility.closeRegionAndWAL(region);
}
use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.
the class TestScannerSelectionUsingTTL method testScannerSelection.
@Test
public void testScannerSelection() throws IOException {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setBoolean("hbase.store.delete.expired.storefile", false);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setMaxVersions(Integer.MAX_VALUE).setTimeToLive(TTL_SECONDS);
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(TABLE);
Region region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, htd);
long ts = EnvironmentEdgeManager.currentTime();
//make sure each new set of Put's have a new ts
long version = 0;
for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
if (iFile == NUM_EXPIRED_FILES) {
Threads.sleepWithoutInterrupt(TTL_MS);
version += TTL_MS;
}
for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
Put put = new Put(Bytes.toBytes("row" + iRow));
for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
put.addColumn(FAMILY_BYTES, Bytes.toBytes("col" + iCol), ts + version, Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
}
region.put(put);
}
region.flush(true);
version++;
}
Scan scan = new Scan();
scan.setMaxVersions(Integer.MAX_VALUE);
CacheConfig cacheConf = new CacheConfig(conf);
LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
cache.clearCache();
InternalScanner scanner = region.getScanner(scan);
List<Cell> results = new ArrayList<>();
final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
int numReturnedRows = 0;
LOG.info("Scanning the entire table");
while (scanner.next(results) || results.size() > 0) {
assertEquals(expectedKVsPerRow, results.size());
++numReturnedRows;
results.clear();
}
assertEquals(NUM_ROWS, numReturnedRows);
Set<String> accessedFiles = cache.getCachedFileNamesForTest();
LOG.debug("Files accessed during scan: " + accessedFiles);
// Exercise both compaction codepaths.
if (explicitCompaction) {
HStore store = (HStore) region.getStore(FAMILY_BYTES);
store.compactRecentForTestingAssumingDefaultPolicy(totalNumFiles);
} else {
region.compact(false);
}
HBaseTestingUtility.closeRegionAndWAL(region);
}
Aggregations