Search in sources :

Example 1 with TimeOffsetEnvironmentEdge

use of org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge in project hbase by apache.

the class TestFIFOCompactionPolicy method prepareData.

private Store prepareData() throws IOException {
    Admin admin = TEST_UTIL.getAdmin();
    if (admin.tableExists(tableName)) {
        admin.disableTable(tableName);
        admin.deleteTable(tableName);
    }
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, FIFOCompactionPolicy.class.getName());
    desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, DisabledRegionSplitPolicy.class.getName());
    HColumnDescriptor colDesc = new HColumnDescriptor(family);
    // 1 sec
    colDesc.setTimeToLive(1);
    desc.addFamily(colDesc);
    admin.createTable(desc);
    Table table = TEST_UTIL.getConnection().getTable(tableName);
    Random rand = new Random();
    TimeOffsetEnvironmentEdge edge = (TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate();
    for (int i = 0; i < 10; i++) {
        for (int j = 0; j < 10; j++) {
            byte[] value = new byte[128 * 1024];
            rand.nextBytes(value);
            table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
        }
        admin.flush(tableName);
        edge.increment(1001);
    }
    return getStoreWithName(tableName);
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Random(java.util.Random) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) DisabledRegionSplitPolicy(org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy) Admin(org.apache.hadoop.hbase.client.Admin) TimeOffsetEnvironmentEdge(org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 2 with TimeOffsetEnvironmentEdge

use of org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge in project hbase by apache.

the class TestDefaultCompactSelection method testCompactionRatio.

@Test
public void testCompactionRatio() throws IOException {
    TimeOffsetEnvironmentEdge edge = new TimeOffsetEnvironmentEdge();
    EnvironmentEdgeManager.injectEdge(edge);
    /**
     * NOTE: these tests are specific to describe the implementation of the
     * current compaction algorithm.  Developed to ensure that refactoring
     * doesn't implicitly alter this.
     */
    long tooBig = maxSize + 1;
    // default case. preserve user ratio on size
    compactEquals(sfCreate(100, 50, 23, 12, 12), 23, 12, 12);
    // less than compact threshold = don't compact
    compactEquals(sfCreate(100, 50, 25, 12, 12));
    // greater than compact size = skip those
    compactEquals(sfCreate(tooBig, tooBig, 700, 700, 700), 700, 700, 700);
    // big size + threshold
    compactEquals(sfCreate(tooBig, tooBig, 700, 700));
    // small files = don't care about ratio
    compactEquals(sfCreate(7, 1, 1), 7, 1, 1);
    // don't exceed max file compact threshold
    // note:  file selection starts with largest to smallest.
    compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1);
    compactEquals(sfCreate(50, 10, 10, 10, 10), 10, 10, 10, 10);
    compactEquals(sfCreate(10, 10, 10, 10, 50), 10, 10, 10, 10);
    compactEquals(sfCreate(251, 253, 251, maxSize - 1), 251, 253, 251);
    compactEquals(sfCreate(maxSize - 1, maxSize - 1, maxSize - 1));
    // Always try and compact something to get below blocking storefile count
    this.conf.setLong("hbase.hstore.compaction.min.size", 1);
    store.storeEngine.getCompactionPolicy().setConf(conf);
    compactEquals(sfCreate(512, 256, 128, 64, 32, 16, 8, 4, 2, 1), 4, 2, 1);
    this.conf.setLong("hbase.hstore.compaction.min.size", minSize);
    store.storeEngine.getCompactionPolicy().setConf(conf);
    /* MAJOR COMPACTION */
    // if a major compaction has been forced, then compact everything
    compactEquals(sfCreate(50, 25, 12, 12), true, 50, 25, 12, 12);
    // also choose files < threshold on major compaction
    compactEquals(sfCreate(12, 12), true, 12, 12);
    // even if one of those files is too big
    compactEquals(sfCreate(tooBig, 12, 12), true, tooBig, 12, 12);
    // don't exceed max file compact threshold, even with major compaction
    store.forceMajor = true;
    compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1);
    store.forceMajor = false;
    // if we exceed maxCompactSize, downgrade to minor
    // if not, it creates a 'snowball effect' when files >> maxCompactSize:
    // the last file in compaction is the aggregate of all previous compactions
    compactEquals(sfCreate(100, 50, 23, 12, 12), true, 23, 12, 12);
    conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1);
    conf.setFloat("hbase.hregion.majorcompaction.jitter", 0);
    store.storeEngine.getCompactionPolicy().setConf(conf);
    try {
        // The modTime of the mocked store file is currentTimeMillis, so we need to increase the
        // timestamp a bit to make sure that now - lowestModTime is greater than major compaction
        // period(1ms).
        // trigger an aged major compaction
        List<StoreFile> candidates = sfCreate(50, 25, 12, 12);
        edge.increment(2);
        compactEquals(candidates, 50, 25, 12, 12);
        // major sure exceeding maxCompactSize also downgrades aged minors
        candidates = sfCreate(100, 50, 23, 12, 12);
        edge.increment(2);
        compactEquals(candidates, 23, 12, 12);
    } finally {
        conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000 * 60 * 60 * 24);
        conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
    }
    /* REFERENCES == file is from a region that was split */
    // treat storefiles that have references like a major compaction
    compactEquals(sfCreate(true, 100, 50, 25, 12, 12), 100, 50, 25, 12, 12);
    // reference files shouldn't obey max threshold
    compactEquals(sfCreate(true, tooBig, 12, 12), tooBig, 12, 12);
    // reference files should obey max file compact to avoid OOM
    compactEquals(sfCreate(true, 7, 6, 5, 4, 3, 2, 1), 7, 6, 5, 4, 3);
    // empty case
    compactEquals(new ArrayList<>());
    // empty case (because all files are too big)
    compactEquals(sfCreate(tooBig, tooBig));
}
Also used : TimeOffsetEnvironmentEdge(org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge) Test(org.junit.Test)

Example 3 with TimeOffsetEnvironmentEdge

use of org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge in project hbase by apache.

the class TestFIFOCompactionPolicy method setEnvironmentEdge.

@BeforeClass
public static void setEnvironmentEdge() {
    EnvironmentEdge ee = new TimeOffsetEnvironmentEdge();
    EnvironmentEdgeManager.injectEdge(ee);
}
Also used : TimeOffsetEnvironmentEdge(org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge) EnvironmentEdge(org.apache.hadoop.hbase.util.EnvironmentEdge) TimeOffsetEnvironmentEdge(org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge) BeforeClass(org.junit.BeforeClass)

Aggregations

TimeOffsetEnvironmentEdge (org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge)3 Random (java.util.Random)1 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)1 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)1 Admin (org.apache.hadoop.hbase.client.Admin)1 Put (org.apache.hadoop.hbase.client.Put)1 Table (org.apache.hadoop.hbase.client.Table)1 DisabledRegionSplitPolicy (org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy)1 EnvironmentEdge (org.apache.hadoop.hbase.util.EnvironmentEdge)1 BeforeClass (org.junit.BeforeClass)1 Test (org.junit.Test)1