Search in sources :

Example 1 with HStore

use of org.apache.hadoop.hbase.regionserver.HStore in project hbase by apache.

the class TestCompactionWithThroughputController method testGetCompactionPressureForStripedStore.

/**
   * Test the logic that we calculate compaction pressure for a striped store.
   */
@Test
public void testGetCompactionPressureForStripedStore() throws Exception {
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, StripeStoreEngine.class.getName());
    conf.setBoolean(StripeStoreConfig.FLUSH_TO_L0_KEY, false);
    conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, 2);
    conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 4);
    conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 12);
    TEST_UTIL.startMiniCluster(1);
    Connection conn = ConnectionFactory.createConnection(conf);
    try {
        HTableDescriptor htd = new HTableDescriptor(tableName);
        htd.addFamily(new HColumnDescriptor(family));
        htd.setCompactionEnabled(false);
        TEST_UTIL.getAdmin().createTable(htd);
        TEST_UTIL.waitTableAvailable(tableName);
        HStore store = (HStore) getStoreWithName(tableName);
        assertEquals(0, store.getStorefilesCount());
        assertEquals(0.0, store.getCompactionPressure(), EPSILON);
        Table table = conn.getTable(tableName);
        for (int i = 0; i < 4; i++) {
            byte[] value1 = new byte[0];
            table.put(new Put(Bytes.toBytes(i)).addColumn(family, qualifier, value1));
            byte[] value = new byte[0];
            table.put(new Put(Bytes.toBytes(100 + i)).addColumn(family, qualifier, value));
            TEST_UTIL.flush(tableName);
        }
        assertEquals(8, store.getStorefilesCount());
        assertEquals(0.0, store.getCompactionPressure(), EPSILON);
        byte[] value5 = new byte[0];
        table.put(new Put(Bytes.toBytes(4)).addColumn(family, qualifier, value5));
        byte[] value4 = new byte[0];
        table.put(new Put(Bytes.toBytes(104)).addColumn(family, qualifier, value4));
        TEST_UTIL.flush(tableName);
        assertEquals(10, store.getStorefilesCount());
        assertEquals(0.5, store.getCompactionPressure(), EPSILON);
        byte[] value3 = new byte[0];
        table.put(new Put(Bytes.toBytes(5)).addColumn(family, qualifier, value3));
        byte[] value2 = new byte[0];
        table.put(new Put(Bytes.toBytes(105)).addColumn(family, qualifier, value2));
        TEST_UTIL.flush(tableName);
        assertEquals(12, store.getStorefilesCount());
        assertEquals(1.0, store.getCompactionPressure(), EPSILON);
        byte[] value1 = new byte[0];
        table.put(new Put(Bytes.toBytes(6)).addColumn(family, qualifier, value1));
        byte[] value = new byte[0];
        table.put(new Put(Bytes.toBytes(106)).addColumn(family, qualifier, value));
        TEST_UTIL.flush(tableName);
        assertEquals(14, store.getStorefilesCount());
        assertEquals(2.0, store.getCompactionPressure(), EPSILON);
    } finally {
        conn.close();
        TEST_UTIL.shutdownMiniCluster();
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) CompactionConfiguration(org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration) StripeStoreEngine(org.apache.hadoop.hbase.regionserver.StripeStoreEngine) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Connection(org.apache.hadoop.hbase.client.Connection) HStore(org.apache.hadoop.hbase.regionserver.HStore) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 2 with HStore

use of org.apache.hadoop.hbase.regionserver.HStore in project hbase by apache.

the class TestCompactedHFilesDischarger method testCleanerWithParallelScanners.

@Test
public void testCleanerWithParallelScanners() throws Exception {
    // Create the cleaner object
    CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(1000, (Stoppable) null, rss, false);
    // Add some data to the region and do some flushes
    for (int i = 1; i < 10; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    for (int i = 11; i < 20; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    for (int i = 21; i < 30; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    Store store = region.getStore(fam);
    assertEquals(3, store.getStorefilesCount());
    Collection<StoreFile> storefiles = store.getStorefiles();
    Collection<StoreFile> compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    // None of the files should be in compacted state.
    for (StoreFile file : storefiles) {
        assertFalse(file.isCompactedAway());
    }
    startScannerThreads();
    // Do compaction
    region.compact(true);
    storefiles = store.getStorefiles();
    int usedReaderCount = 0;
    int unusedReaderCount = 0;
    for (StoreFile file : storefiles) {
        if (file.getRefCount() == 0) {
            unusedReaderCount++;
        }
    }
    compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    for (StoreFile file : compactedfiles) {
        assertEquals("Refcount should be 3", 3, file.getRefCount());
        usedReaderCount++;
    }
    // The newly compacted file will not be used by any scanner
    assertEquals("unused reader count should be 1", 1, unusedReaderCount);
    assertEquals("used reader count should be 3", 3, usedReaderCount);
    // now run the cleaner
    cleaner.chore();
    countDown();
    // No change in the number of store files as none of the compacted files could be cleaned up
    assertEquals(1, store.getStorefilesCount());
    assertEquals(3, ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles().size());
    while (scanCompletedCounter.get() != 3) {
        Thread.sleep(100);
    }
    // reset
    latch = new CountDownLatch(3);
    scanCompletedCounter.set(0);
    counter.set(0);
    // Try creating a new scanner and it should use only the new file created after compaction
    startScannerThreads();
    storefiles = store.getStorefiles();
    usedReaderCount = 0;
    unusedReaderCount = 0;
    for (StoreFile file : storefiles) {
        if (file.getRefCount() == 3) {
            usedReaderCount++;
        }
    }
    compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    for (StoreFile file : compactedfiles) {
        assertEquals("Refcount should be 0", 0, file.getRefCount());
        unusedReaderCount++;
    }
    // Though there are files we are not using them for reads
    assertEquals("unused reader count should be 3", 3, unusedReaderCount);
    assertEquals("used reader count should be 1", 1, usedReaderCount);
    countDown();
    while (scanCompletedCounter.get() != 3) {
        Thread.sleep(100);
    }
    // Run the cleaner again
    cleaner.chore();
    // Now the cleaner should be able to clear it up because there are no active readers
    assertEquals(1, store.getStorefilesCount());
    storefiles = store.getStorefiles();
    for (StoreFile file : storefiles) {
        // Should not be in compacted state
        assertFalse(file.isCompactedAway());
    }
    compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    assertTrue(compactedfiles.isEmpty());
}
Also used : CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) HStore(org.apache.hadoop.hbase.regionserver.HStore) Store(org.apache.hadoop.hbase.regionserver.Store) CountDownLatch(java.util.concurrent.CountDownLatch) HStore(org.apache.hadoop.hbase.regionserver.HStore) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 3 with HStore

use of org.apache.hadoop.hbase.regionserver.HStore in project hbase by apache.

the class TestScannerSelectionUsingTTL method testScannerSelection.

@Test
public void testScannerSelection() throws IOException {
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setBoolean("hbase.store.delete.expired.storefile", false);
    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setMaxVersions(Integer.MAX_VALUE).setTimeToLive(TTL_SECONDS);
    HTableDescriptor htd = new HTableDescriptor(TABLE);
    htd.addFamily(hcd);
    HRegionInfo info = new HRegionInfo(TABLE);
    Region region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, htd);
    long ts = EnvironmentEdgeManager.currentTime();
    //make sure each new set of Put's have a new ts
    long version = 0;
    for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
        if (iFile == NUM_EXPIRED_FILES) {
            Threads.sleepWithoutInterrupt(TTL_MS);
            version += TTL_MS;
        }
        for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
            Put put = new Put(Bytes.toBytes("row" + iRow));
            for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
                put.addColumn(FAMILY_BYTES, Bytes.toBytes("col" + iCol), ts + version, Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
            }
            region.put(put);
        }
        region.flush(true);
        version++;
    }
    Scan scan = new Scan();
    scan.setMaxVersions(Integer.MAX_VALUE);
    CacheConfig cacheConf = new CacheConfig(conf);
    LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
    cache.clearCache();
    InternalScanner scanner = region.getScanner(scan);
    List<Cell> results = new ArrayList<>();
    final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
    int numReturnedRows = 0;
    LOG.info("Scanning the entire table");
    while (scanner.next(results) || results.size() > 0) {
        assertEquals(expectedKVsPerRow, results.size());
        ++numReturnedRows;
        results.clear();
    }
    assertEquals(NUM_ROWS, numReturnedRows);
    Set<String> accessedFiles = cache.getCachedFileNamesForTest();
    LOG.debug("Files accessed during scan: " + accessedFiles);
    // Exercise both compaction codepaths.
    if (explicitCompaction) {
        HStore store = (HStore) region.getStore(FAMILY_BYTES);
        store.compactRecentForTestingAssumingDefaultPolicy(totalNumFiles);
    } else {
        region.compact(false);
    }
    HBaseTestingUtility.closeRegionAndWAL(region);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) HStore(org.apache.hadoop.hbase.regionserver.HStore) Test(org.junit.Test)

Example 4 with HStore

use of org.apache.hadoop.hbase.regionserver.HStore in project hbase by apache.

the class AbstractTestWALReplay method testReplayEditsWrittenViaHRegion.

/**
   * Test writing edits into an HRegion, closing it, splitting logs, opening
   * Region again.  Verify seqids.
   * @throws IOException
   * @throws IllegalAccessException
   * @throws NoSuchFieldException
   * @throws IllegalArgumentException
   * @throws SecurityException
   */
@Test
public void testReplayEditsWrittenViaHRegion() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenViaHRegion");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
    deleteDir(basedir);
    final byte[] rowName = tableName.getName();
    final int countPerFamily = 10;
    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region3 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtility.closeRegionAndWAL(region3);
    // Write countPerFamily edits into the three families.  Do a flush on one
    // of the families during the load of edits so its seqid is not same as
    // others to test we do right thing when different seqids.
    WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
    long seqid = region.getOpenSeqNum();
    boolean first = true;
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
        if (first) {
            // If first, so we have at least one family w/ different seqid to rest.
            region.flush(true);
            first = false;
        }
    }
    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
    // Now close the region (without flush), split the log, reopen the region and assert that
    // replay of log has the correct effect, that our seqids are calculated correctly so
    // all edits in logs are seen as 'stale'/old.
    region.close(true);
    wal.shutdown();
    runWALSplit(this.conf);
    WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
    HRegion region2 = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal2);
    long seqid2 = region2.getOpenSeqNum();
    assertTrue(seqid + result.size() < seqid2);
    final Result result1b = region2.get(g);
    assertEquals(result.size(), result1b.size());
    // correctly when region is opened again.
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
    }
    // Get count of edits.
    final Result result2 = region2.get(g);
    assertEquals(2 * result.size(), result2.size());
    wal2.sync();
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString());
    user.runAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // Make a new wal for new region open.
            WAL wal3 = createWAL(newConf, hbaseRootDir, logName);
            final AtomicInteger countOfRestoredEdits = new AtomicInteger(0);
            HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) {

                @Override
                protected void restoreEdit(HStore s, Cell cell, MemstoreSize memstoreSize) {
                    super.restoreEdit(s, cell, memstoreSize);
                    countOfRestoredEdits.incrementAndGet();
                }
            };
            long seqid3 = region3.initialize();
            Result result3 = region3.get(g);
            // Assert that count of cells is same as before crash.
            assertEquals(result2.size(), result3.size());
            assertEquals(htd.getFamilies().size() * countPerFamily, countOfRestoredEdits.get());
            // I can't close wal1.  Its been appropriated when we split.
            region3.close();
            wal3.close();
            return null;
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) User(org.apache.hadoop.hbase.security.User) Configuration(org.apache.hadoop.conf.Configuration) MemstoreSize(org.apache.hadoop.hbase.regionserver.MemstoreSize) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Get(org.apache.hadoop.hbase.client.Get) FileSystem(org.apache.hadoop.fs.FileSystem) HStore(org.apache.hadoop.hbase.regionserver.HStore) Test(org.junit.Test)

Example 5 with HStore

use of org.apache.hadoop.hbase.regionserver.HStore in project hbase by apache.

the class TestCompactedHFilesDischarger method testCompactedHFilesCleaner.

@Test
public void testCompactedHFilesCleaner() throws Exception {
    // Create the cleaner object
    CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(1000, (Stoppable) null, rss, false);
    // Add some data to the region and do some flushes
    for (int i = 1; i < 10; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    for (int i = 11; i < 20; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    for (int i = 21; i < 30; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    Store store = region.getStore(fam);
    assertEquals(3, store.getStorefilesCount());
    Collection<StoreFile> storefiles = store.getStorefiles();
    Collection<StoreFile> compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    // None of the files should be in compacted state.
    for (StoreFile file : storefiles) {
        assertFalse(file.isCompactedAway());
    }
    // Try to run the cleaner without compaction. there should not be any change
    cleaner.chore();
    storefiles = store.getStorefiles();
    // None of the files should be in compacted state.
    for (StoreFile file : storefiles) {
        assertFalse(file.isCompactedAway());
    }
    // now do some compaction
    region.compact(true);
    // Still the flushed files should be present until the cleaner runs. But the state of it should
    // be in COMPACTED state
    assertEquals(1, store.getStorefilesCount());
    assertEquals(3, ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles().size());
    // Run the cleaner
    cleaner.chore();
    assertEquals(1, store.getStorefilesCount());
    storefiles = store.getStorefiles();
    for (StoreFile file : storefiles) {
        // Should not be in compacted state
        assertFalse(file.isCompactedAway());
    }
    compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    assertTrue(compactedfiles.isEmpty());
}
Also used : CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) HStore(org.apache.hadoop.hbase.regionserver.HStore) Store(org.apache.hadoop.hbase.regionserver.Store) HStore(org.apache.hadoop.hbase.regionserver.HStore) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Aggregations

HStore (org.apache.hadoop.hbase.regionserver.HStore)7 Test (org.junit.Test)6 Put (org.apache.hadoop.hbase.client.Put)5 Configuration (org.apache.hadoop.conf.Configuration)3 CompactedHFilesDischarger (org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger)3 Store (org.apache.hadoop.hbase.regionserver.Store)3 StoreFile (org.apache.hadoop.hbase.regionserver.StoreFile)3 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)2 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 Cell (org.apache.hadoop.hbase.Cell)1 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)1 Connection (org.apache.hadoop.hbase.client.Connection)1 Get (org.apache.hadoop.hbase.client.Get)1 Result (org.apache.hadoop.hbase.client.Result)1