Search in sources :

Example 1 with MemoryCompactionPolicy

use of org.apache.hadoop.hbase.MemoryCompactionPolicy in project hbase by apache.

the class HStore method getMemstore.

/**
 * @return MemStore Instance to use in this store.
 */
private MemStore getMemstore() {
    MemStore ms = null;
    // Check if in-memory-compaction configured. Note MemoryCompactionPolicy is an enum!
    MemoryCompactionPolicy inMemoryCompaction = null;
    if (this.getTableName().isSystemTable()) {
        inMemoryCompaction = MemoryCompactionPolicy.valueOf(conf.get("hbase.systemtables.compacting.memstore.type", "NONE"));
    } else {
        inMemoryCompaction = getColumnFamilyDescriptor().getInMemoryCompaction();
    }
    if (inMemoryCompaction == null) {
        inMemoryCompaction = MemoryCompactionPolicy.valueOf(conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT).toUpperCase());
    }
    switch(inMemoryCompaction) {
        case NONE:
            Class<? extends MemStore> memStoreClass = conf.getClass(MEMSTORE_CLASS_NAME, DefaultMemStore.class, MemStore.class);
            ms = ReflectionUtils.newInstance(memStoreClass, new Object[] { conf, getComparator(), this.getHRegion().getRegionServicesForStores() });
            break;
        default:
            Class<? extends CompactingMemStore> compactingMemStoreClass = conf.getClass(MEMSTORE_CLASS_NAME, CompactingMemStore.class, CompactingMemStore.class);
            ms = ReflectionUtils.newInstance(compactingMemStoreClass, new Object[] { conf, getComparator(), this, this.getHRegion().getRegionServicesForStores(), inMemoryCompaction });
    }
    return ms;
}
Also used : MemoryCompactionPolicy(org.apache.hadoop.hbase.MemoryCompactionPolicy)

Example 2 with MemoryCompactionPolicy

use of org.apache.hadoop.hbase.MemoryCompactionPolicy in project hbase by apache.

the class TestCompactingMemStore method testMagicCompaction3Buckets.

@Test
public void testMagicCompaction3Buckets() throws IOException {
    MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.ADAPTIVE;
    memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType));
    memstore.getConfiguration().setDouble(AdaptiveMemStoreCompactionStrategy.ADAPTIVE_COMPACTION_THRESHOLD_KEY, 0.45);
    memstore.getConfiguration().setInt(AdaptiveMemStoreCompactionStrategy.COMPACTING_MEMSTORE_THRESHOLD_KEY, 2);
    memstore.getConfiguration().setInt(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 1);
    ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration());
    String[] keys1 = { "A", "B", "D" };
    String[] keys2 = { "A" };
    String[] keys3 = { "A", "A", "B", "C" };
    String[] keys4 = { "D", "B", "B" };
    // Adding 3 cells.
    int totalCellsLen1 = addRowsByKeys(memstore, keys1);
    int oneCellOnCSLMHeapSize = 120;
    assertEquals(totalCellsLen1, region.getMemStoreDataSize());
    long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 3 * oneCellOnCSLMHeapSize;
    assertEquals(totalHeapSize, memstore.heapSize());
    // push keys to pipeline - flatten
    ((CompactingMemStore) memstore).flushInMemory();
    assertEquals(3, ((CompactingMemStore) memstore).getImmutableSegments().getNumOfCells());
    assertEquals(1.0, ((CompactingMemStore) memstore).getImmutableSegments().getEstimatedUniquesFrac(), 0);
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    // Adding 1 more cell - flatten.
    addRowsByKeys(memstore, keys2);
    // push keys to pipeline without compaction
    ((CompactingMemStore) memstore).flushInMemory();
    assertEquals(4, ((CompactingMemStore) memstore).getImmutableSegments().getNumOfCells());
    assertEquals(1.0, ((CompactingMemStore) memstore).getImmutableSegments().getEstimatedUniquesFrac(), 0);
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    // Adding 4 more cells - merge.
    addRowsByKeys(memstore, keys3);
    // push keys to pipeline without compaction
    ((CompactingMemStore) memstore).flushInMemory();
    assertEquals(8, ((CompactingMemStore) memstore).getImmutableSegments().getNumOfCells());
    assertEquals((4.0 / 8.0), ((CompactingMemStore) memstore).getImmutableSegments().getEstimatedUniquesFrac(), 0);
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    // 3 more cells added - compact (or not)
    addRowsByKeys(memstore, keys4);
    // push keys to pipeline and compact
    ((CompactingMemStore) memstore).flushInMemory();
    int numCells = ((CompactingMemStore) memstore).getImmutableSegments().getNumOfCells();
    assertTrue(4 == numCells || 11 == numCells);
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    MemStoreSize mss = memstore.getFlushableSize();
    // push keys to snapshot
    MemStoreSnapshot snapshot = memstore.snapshot();
    // simulate flusher
    region.decrMemStoreSize(mss);
    ImmutableSegment s = memstore.getSnapshot();
    numCells = s.getCellsCount();
    assertTrue(4 == numCells || 11 == numCells);
    assertEquals(0, regionServicesForStores.getMemStoreSize());
    memstore.clearSnapshot(snapshot.getId());
}
Also used : MemoryCompactionPolicy(org.apache.hadoop.hbase.MemoryCompactionPolicy) Test(org.junit.Test)

Example 3 with MemoryCompactionPolicy

use of org.apache.hadoop.hbase.MemoryCompactionPolicy in project hbase by apache.

the class TestCompactingToCellFlatMapMemStore method testBigCellSizeAfterInMemoryCompaction.

/**
 * Test big cell size after in memory compaction. (HBASE-26467)
 */
@Test
public void testBigCellSizeAfterInMemoryCompaction() throws IOException {
    MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC;
    memstore.getConfiguration().setInt(MemStoreCompactionStrategy.COMPACTING_MEMSTORE_THRESHOLD_KEY, 1);
    memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType));
    ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration());
    byte[] val = new byte[MemStoreLAB.CHUNK_SIZE_DEFAULT];
    long size = addRowsByKeys(memstore, new String[] { "A" }, val);
    ((MyCompactingMemStore) memstore).flushInMemory();
    for (KeyValueScanner scanner : memstore.getScanners(Long.MAX_VALUE)) {
        Cell cell;
        while ((cell = scanner.next()) != null) {
            assertEquals(size, cell.getSerializedSize());
        }
    }
}
Also used : MemoryCompactionPolicy(org.apache.hadoop.hbase.MemoryCompactionPolicy) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 4 with MemoryCompactionPolicy

use of org.apache.hadoop.hbase.MemoryCompactionPolicy in project hbase by apache.

the class TestCompactingToCellFlatMapMemStore method testFlatteningToJumboCellChunkMap.

/**
 * CellChunkMap Segment index requires all cell data to be written in the MSLAB Chunks.
 * Even though MSLAB is enabled, cells bigger than the size of a chunk are not
 * written in the MSLAB Chunks.
 * If such cells are found in the process of flattening into CellChunkMap
 * (in-memory-flush) they need to be copied into MSLAB.
 * testFlatteningToJumboCellChunkMap checks that the process of flattening
 * into CellChunkMap succeeds, even when such big cells are allocated.
 */
@Test
public void testFlatteningToJumboCellChunkMap() throws IOException {
    if (toCellChunkMap == false) {
        return;
    }
    // set memstore to flat into CellChunkMap
    MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC;
    memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType));
    ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration());
    ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP);
    int numOfCells = 1;
    char[] chars = new char[MemStoreLAB.CHUNK_SIZE_DEFAULT];
    for (int i = 0; i < chars.length; i++) {
        chars[i] = 'A';
    }
    String bigVal = new String(chars);
    String[] keys1 = { "A" };
    // make one cell
    byte[] row = Bytes.toBytes(keys1[0]);
    byte[] val = Bytes.toBytes(bigVal);
    KeyValue kv = new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), EnvironmentEdgeManager.currentTime(), val);
    // test 1 bucket
    int totalCellsLen = addRowsByKeys(memstore, keys1, val);
    long oneCellOnCSLMHeapSize = ClassSize.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + kv.heapSize());
    long totalHeapSize = numOfCells * oneCellOnCSLMHeapSize + MutableSegment.DEEP_OVERHEAD;
    assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
    assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
    // push keys to pipeline and flatten
    ((CompactingMemStore) memstore).flushInMemory();
    while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
        Threads.sleep(10);
    }
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    // One cell is duplicated, but it shouldn't be compacted because we are in BASIC mode.
    // totalCellsLen should remain the same
    long oneCellOnCCMHeapSize = (long) ClassSize.CELL_CHUNK_MAP_ENTRY + ClassSize.align(kv.getSerializedSize());
    totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM + numOfCells * oneCellOnCCMHeapSize;
    assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
    assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
    MemStoreSize mss = memstore.getFlushableSize();
    // push keys to snapshot
    MemStoreSnapshot snapshot = memstore.snapshot();
    // simulate flusher
    region.decrMemStoreSize(mss);
    ImmutableSegment s = memstore.getSnapshot();
    assertEquals(numOfCells, s.getCellsCount());
    assertEquals(0, regionServicesForStores.getMemStoreSize());
    memstore.clearSnapshot(snapshot.getId());
    // Allocating two big cells (too big for being copied into a regular chunk).
    String[] keys2 = { "C", "D" };
    addRowsByKeys(memstore, keys2, val);
    while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
        Threads.sleep(10);
    }
    // The in-memory flush size is bigger than the size of a single cell,
    // but smaller than the size of two cells.
    // Therefore, the two created cells are flushed together as a single CSLMImmutableSegment and
    // flattened.
    totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM + 2 * oneCellOnCCMHeapSize;
    assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) MemoryCompactionPolicy(org.apache.hadoop.hbase.MemoryCompactionPolicy) Test(org.junit.Test)

Example 5 with MemoryCompactionPolicy

use of org.apache.hadoop.hbase.MemoryCompactionPolicy in project hbase by apache.

the class TestCompactingToCellFlatMapMemStore method testFlatteningToBigCellChunkMap.

/**
 * CellChunkMap Segment index requires all cell data to be written in the MSLAB Chunks.
 * Even though MSLAB is enabled, cells bigger than maxAlloc
 * (even if smaller than the size of a chunk) are not written in the MSLAB Chunks.
 * If such cells are found in the process of flattening into CellChunkMap
 * (in-memory-flush) they need to be copied into MSLAB.
 * testFlatteningToBigCellChunkMap checks that the process of flattening into
 * CellChunkMap succeeds, even when such big cells are allocated.
 */
@Test
public void testFlatteningToBigCellChunkMap() throws IOException {
    if (toCellChunkMap == false) {
        return;
    }
    // set memstore to flat into CellChunkMap
    MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC;
    memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType));
    ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration());
    ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP);
    int numOfCells = 4;
    char[] chars = new char[MemStoreLAB.MAX_ALLOC_DEFAULT];
    for (int i = 0; i < chars.length; i++) {
        chars[i] = 'A';
    }
    String bigVal = new String(chars);
    String[] keys1 = { "A", "B", "C", "D" };
    // make one cell
    byte[] row = Bytes.toBytes(keys1[0]);
    byte[] val = Bytes.toBytes(bigVal);
    KeyValue kv = new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), EnvironmentEdgeManager.currentTime(), val);
    // test 1 bucket
    int totalCellsLen = addRowsByKeys(memstore, keys1, val);
    long oneCellOnCSLMHeapSize = ClassSize.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + kv.heapSize());
    long totalHeapSize = numOfCells * oneCellOnCSLMHeapSize + MutableSegment.DEEP_OVERHEAD;
    assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
    assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
    // push keys to pipeline and flatten
    ((CompactingMemStore) memstore).flushInMemory();
    while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
        Threads.sleep(10);
    }
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    // One cell is duplicated, but it shouldn't be compacted because we are in BASIC mode.
    // totalCellsLen should remain the same
    long oneCellOnCCMHeapSize = ClassSize.CELL_CHUNK_MAP_ENTRY + ClassSize.align(kv.getSerializedSize());
    totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM + numOfCells * oneCellOnCCMHeapSize;
    assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
    assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
    MemStoreSize mss = memstore.getFlushableSize();
    // push keys to snapshot
    MemStoreSnapshot snapshot = memstore.snapshot();
    // simulate flusher
    region.decrMemStoreSize(mss);
    ImmutableSegment s = memstore.getSnapshot();
    assertEquals(numOfCells, s.getCellsCount());
    assertEquals(0, regionServicesForStores.getMemStoreSize());
    memstore.clearSnapshot(snapshot.getId());
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) MemoryCompactionPolicy(org.apache.hadoop.hbase.MemoryCompactionPolicy) Test(org.junit.Test)

Aggregations

MemoryCompactionPolicy (org.apache.hadoop.hbase.MemoryCompactionPolicy)13 Test (org.junit.Test)12 KeyValue (org.apache.hadoop.hbase.KeyValue)5 ArrayList (java.util.ArrayList)1 Cell (org.apache.hadoop.hbase.Cell)1