Search in sources :

Example 1 with RatioBasedCompactionPolicy

use of org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy in project hbase by apache.

the class TestDefaultCompactSelection method testCompactionEmptyHFile.

@Test
public void testCompactionEmptyHFile() throws IOException {
    // Set TTL
    ScanInfo oldScanInfo = store.getScanInfo();
    ScanInfo newScanInfo = new ScanInfo(oldScanInfo.getConfiguration(), oldScanInfo.getFamily(), oldScanInfo.getMinVersions(), oldScanInfo.getMaxVersions(), 600, oldScanInfo.getKeepDeletedCells(), oldScanInfo.getTimeToPurgeDeletes(), oldScanInfo.getComparator());
    store.setScanInfo(newScanInfo);
    // Do not compact empty store file
    List<StoreFile> candidates = sfCreate(0);
    for (StoreFile file : candidates) {
        if (file instanceof MockStoreFile) {
            MockStoreFile mockFile = (MockStoreFile) file;
            mockFile.setTimeRangeTracker(new TimeRangeTracker(-1, -1));
            mockFile.setEntries(0);
        }
    }
    // Test Default compactions
    CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()).selectCompaction(candidates, new ArrayList<>(), false, false, false);
    Assert.assertTrue(result.getFiles().isEmpty());
    store.setScanInfo(oldScanInfo);
}
Also used : RatioBasedCompactionPolicy(org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy) CompactionRequest(org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest) Test(org.junit.Test)

Example 2 with RatioBasedCompactionPolicy

use of org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy in project hbase by apache.

the class TestCompactionPolicy method compactEquals.

void compactEquals(List<StoreFile> candidates, boolean forcemajor, boolean isOffPeak, long... expected) throws IOException {
    store.forceMajor = forcemajor;
    // Test Default compactions
    CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()).selectCompaction(candidates, new ArrayList<>(), false, isOffPeak, forcemajor);
    List<StoreFile> actual = new ArrayList<>(result.getFiles());
    if (isOffPeak && !forcemajor) {
        Assert.assertTrue(result.isOffPeak());
    }
    Assert.assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
    store.forceMajor = false;
}
Also used : ArrayList(java.util.ArrayList) RatioBasedCompactionPolicy(org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy) CompactionRequest(org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest)

Example 3 with RatioBasedCompactionPolicy

use of org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy in project hbase by apache.

the class TestMajorCompaction method testTimeBasedMajorCompaction.

@Test
public void testTimeBasedMajorCompaction() throws Exception {
    // create 2 storefiles and force a major compaction to reset the time
    // 10 sec
    int delay = 10 * 1000;
    // 20%
    float jitterPct = 0.20f;
    conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, delay);
    conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct);
    HStore s = ((HStore) r.getStore(COLUMN_FAMILY));
    s.storeEngine.getCompactionPolicy().setConf(conf);
    try {
        createStoreFile(r);
        createStoreFile(r);
        r.compact(true);
        // add one more file & verify that a regular compaction won't work
        createStoreFile(r);
        r.compact(false);
        assertEquals(2, s.getStorefilesCount());
        // ensure that major compaction time is deterministic
        RatioBasedCompactionPolicy c = (RatioBasedCompactionPolicy) s.storeEngine.getCompactionPolicy();
        Collection<StoreFile> storeFiles = s.getStorefiles();
        long mcTime = c.getNextMajorCompactTime(storeFiles);
        for (int i = 0; i < 10; ++i) {
            assertEquals(mcTime, c.getNextMajorCompactTime(storeFiles));
        }
        // ensure that the major compaction time is within the variance
        long jitter = Math.round(delay * jitterPct);
        assertTrue(delay - jitter <= mcTime && mcTime <= delay + jitter);
        // wait until the time-based compaction interval
        Thread.sleep(mcTime);
        // trigger a compaction request and ensure that it's upgraded to major
        r.compact(false);
        assertEquals(1, s.getStorefilesCount());
    } finally {
        // reset the timed compaction settings
        conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000 * 60 * 60 * 24);
        conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
        // run a major to reset the cache
        createStoreFile(r);
        r.compact(true);
        assertEquals(1, s.getStorefilesCount());
    }
}
Also used : RatioBasedCompactionPolicy(org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy) Test(org.junit.Test)

Aggregations

RatioBasedCompactionPolicy (org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy)3 CompactionRequest (org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest)2 Test (org.junit.Test)2 ArrayList (java.util.ArrayList)1