Search in sources :

Example 36 with InternalScanner

use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.

the class TestFilter method verifyScanFull.

private void verifyScanFull(Scan s, KeyValue[] kvs) throws IOException {
    InternalScanner scanner = this.region.getScanner(s);
    List<Cell> results = new ArrayList<>();
    int row = 0;
    int idx = 0;
    for (boolean done = true; done; row++) {
        done = scanner.next(results);
        Arrays.sort(results.toArray(new Cell[results.size()]), CellComparator.COMPARATOR);
        if (results.isEmpty())
            break;
        assertTrue("Scanned too many keys! Only expected " + kvs.length + " total but already scanned " + (results.size() + idx) + (results.isEmpty() ? "" : "(" + results.get(0).toString() + ")"), kvs.length >= idx + results.size());
        for (Cell kv : results) {
            LOG.info("row=" + row + ", result=" + kv.toString() + ", match=" + kvs[idx].toString());
            assertTrue("Row mismatch", CellUtil.matchingRow(kv, kvs[idx]));
            assertTrue("Family mismatch", CellUtil.matchingFamily(kv, kvs[idx]));
            assertTrue("Qualifier mismatch", CellUtil.matchingQualifier(kv, kvs[idx]));
            assertTrue("Value mismatch", CellUtil.matchingValue(kv, kvs[idx]));
            idx++;
        }
        results.clear();
    }
    LOG.info("Looked at " + row + " rows with " + idx + " keys");
    assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, kvs.length, idx);
}
Also used : InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ArrayList(java.util.ArrayList) Cell(org.apache.hadoop.hbase.Cell)

Example 37 with InternalScanner

use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.

the class TestFilter method tes94FilterRowCompatibility.

/**
   * The following test is to ensure old(such as hbase0.94) filterRow() can be correctly fired in
   * 0.96+ code base.
   *
   * See HBASE-10366
   *
   * @throws Exception
   */
@Test
public void tes94FilterRowCompatibility() throws Exception {
    Scan s = new Scan();
    OldTestFilter filter = new OldTestFilter();
    s.setFilter(filter);
    InternalScanner scanner = this.region.getScanner(s);
    ArrayList<Cell> values = new ArrayList<>();
    scanner.next(values);
    assertTrue("All rows should be filtered out", values.isEmpty());
}
Also used : InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 38 with InternalScanner

use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.

the class TestFilter method verifyScanFullNoValues.

private void verifyScanFullNoValues(Scan s, KeyValue[] kvs, boolean useLen) throws IOException {
    InternalScanner scanner = this.region.getScanner(s);
    List<Cell> results = new ArrayList<>();
    int row = 0;
    int idx = 0;
    for (boolean more = true; more; row++) {
        more = scanner.next(results);
        Arrays.sort(results.toArray(new Cell[results.size()]), CellComparator.COMPARATOR);
        if (results.isEmpty())
            break;
        assertTrue("Scanned too many keys! Only expected " + kvs.length + " total but already scanned " + (results.size() + idx) + (results.isEmpty() ? "" : "(" + results.get(0).toString() + ")"), kvs.length >= idx + results.size());
        for (Cell kv : results) {
            LOG.info("row=" + row + ", result=" + kv.toString() + ", match=" + kvs[idx].toString());
            assertTrue("Row mismatch", CellUtil.matchingRow(kv, kvs[idx]));
            assertTrue("Family mismatch", CellUtil.matchingFamily(kv, kvs[idx]));
            assertTrue("Qualifier mismatch", CellUtil.matchingQualifier(kv, kvs[idx]));
            assertFalse("Should not have returned whole value", CellUtil.matchingValue(kv, kvs[idx]));
            if (useLen) {
                assertEquals("Value in result is not SIZEOF_INT", kv.getValueLength(), Bytes.SIZEOF_INT);
                LOG.info("idx = " + idx + ", len=" + kvs[idx].getValueLength() + ", actual=" + Bytes.toInt(CellUtil.cloneValue(kv)));
                assertEquals("Scan value should be the length of the actual value. ", kvs[idx].getValueLength(), Bytes.toInt(CellUtil.cloneValue(kv)));
                LOG.info("good");
            } else {
                assertEquals("Value in result is not empty", kv.getValueLength(), 0);
            }
            idx++;
        }
        results.clear();
    }
    LOG.info("Looked at " + row + " rows with " + idx + " keys");
    assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, kvs.length, idx);
}
Also used : InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ArrayList(java.util.ArrayList) Cell(org.apache.hadoop.hbase.Cell)

Example 39 with InternalScanner

use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.

the class TestScannerSelectionUsingKeyRange method testScannerSelection.

@Test
public void testScannerSelection() throws IOException {
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setInt("hbase.hstore.compactionThreshold", 10000);
    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true).setBloomFilterType(bloomType);
    HTableDescriptor htd = new HTableDescriptor(TABLE);
    htd.addFamily(hcd);
    HRegionInfo info = new HRegionInfo(TABLE);
    Region region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), conf, htd);
    for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
        for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
            Put put = new Put(Bytes.toBytes("row" + iRow));
            for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
                put.addColumn(FAMILY_BYTES, Bytes.toBytes("col" + iCol), Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
            }
            region.put(put);
        }
        region.flush(true);
    }
    Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
    CacheConfig.blockCacheDisabled = false;
    CacheConfig cacheConf = new CacheConfig(conf);
    LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
    cache.clearCache();
    InternalScanner scanner = region.getScanner(scan);
    List<Cell> results = new ArrayList<>();
    while (scanner.next(results)) {
    }
    scanner.close();
    assertEquals(0, results.size());
    Set<String> accessedFiles = cache.getCachedFileNamesForTest();
    assertEquals(expectedCount, accessedFiles.size());
    HBaseTestingUtility.closeRegionAndWAL(region);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 40 with InternalScanner

use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.

the class TestScannerSelectionUsingTTL method testScannerSelection.

@Test
public void testScannerSelection() throws IOException {
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setBoolean("hbase.store.delete.expired.storefile", false);
    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setMaxVersions(Integer.MAX_VALUE).setTimeToLive(TTL_SECONDS);
    HTableDescriptor htd = new HTableDescriptor(TABLE);
    htd.addFamily(hcd);
    HRegionInfo info = new HRegionInfo(TABLE);
    Region region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, htd);
    long ts = EnvironmentEdgeManager.currentTime();
    //make sure each new set of Put's have a new ts
    long version = 0;
    for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
        if (iFile == NUM_EXPIRED_FILES) {
            Threads.sleepWithoutInterrupt(TTL_MS);
            version += TTL_MS;
        }
        for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
            Put put = new Put(Bytes.toBytes("row" + iRow));
            for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
                put.addColumn(FAMILY_BYTES, Bytes.toBytes("col" + iCol), ts + version, Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
            }
            region.put(put);
        }
        region.flush(true);
        version++;
    }
    Scan scan = new Scan();
    scan.setMaxVersions(Integer.MAX_VALUE);
    CacheConfig cacheConf = new CacheConfig(conf);
    LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
    cache.clearCache();
    InternalScanner scanner = region.getScanner(scan);
    List<Cell> results = new ArrayList<>();
    final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
    int numReturnedRows = 0;
    LOG.info("Scanning the entire table");
    while (scanner.next(results) || results.size() > 0) {
        assertEquals(expectedKVsPerRow, results.size());
        ++numReturnedRows;
        results.clear();
    }
    assertEquals(NUM_ROWS, numReturnedRows);
    Set<String> accessedFiles = cache.getCachedFileNamesForTest();
    LOG.debug("Files accessed during scan: " + accessedFiles);
    // Exercise both compaction codepaths.
    if (explicitCompaction) {
        HStore store = (HStore) region.getStore(FAMILY_BYTES);
        store.compactRecentForTestingAssumingDefaultPolicy(totalNumFiles);
    } else {
        region.compact(false);
    }
    HBaseTestingUtility.closeRegionAndWAL(region);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) HStore(org.apache.hadoop.hbase.regionserver.HStore) Test(org.junit.Test)

Aggregations

InternalScanner (org.apache.hadoop.hbase.regionserver.InternalScanner)44 ArrayList (java.util.ArrayList)41 Cell (org.apache.hadoop.hbase.Cell)36 Scan (org.apache.hadoop.hbase.client.Scan)34 Test (org.junit.Test)17 IOException (java.io.IOException)15 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)12 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)12 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)9 Put (org.apache.hadoop.hbase.client.Put)9 List (java.util.List)7 AggregateResponse (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse)7 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)7 Region (org.apache.hadoop.hbase.regionserver.Region)6 Configuration (org.apache.hadoop.conf.Configuration)5 KeyValue (org.apache.hadoop.hbase.KeyValue)5 HashMap (java.util.HashMap)4 ScanType (org.apache.hadoop.hbase.regionserver.ScanType)4 StoreFileScanner (org.apache.hadoop.hbase.regionserver.StoreFileScanner)4 ByteString (com.google.protobuf.ByteString)3