Search in sources :

Example 6 with MemoryCompactionPolicy

use of org.apache.hadoop.hbase.MemoryCompactionPolicy in project hbase by apache.

the class TestCompactingMemStore method testCompaction2Buckets.

@Test
public void testCompaction2Buckets() throws IOException {
    // set memstore to do basic structure flattening, the "eager" option is tested in
    // TestCompactingToCellFlatMapMemStore
    MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC;
    memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType));
    memstore.getConfiguration().set(MemStoreCompactionStrategy.COMPACTING_MEMSTORE_THRESHOLD_KEY, String.valueOf(1));
    ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration());
    String[] keys1 = { "A", "A", "B", "C" };
    String[] keys2 = { "A", "B", "D" };
    int totalCellsLen1 = addRowsByKeys(memstore, keys1);
    int oneCellOnCSLMHeapSize = 120;
    int oneCellOnCAHeapSize = 88;
    long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * oneCellOnCSLMHeapSize;
    assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize());
    assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
    // push keys to pipeline and compact
    ((CompactingMemStore) memstore).flushInMemory();
    int counter = 0;
    for (Segment s : memstore.getSegments()) {
        counter += s.getCellsCount();
    }
    assertEquals(4, counter);
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    // There is no compaction, as the compacting memstore type is basic.
    // totalCellsLen remains the same
    assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize());
    totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM + 4 * oneCellOnCAHeapSize;
    assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
    int totalCellsLen2 = addRowsByKeys(memstore, keys2);
    totalHeapSize += 3 * oneCellOnCSLMHeapSize;
    assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemStoreSize());
    assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
    MemStoreSize mss = memstore.getFlushableSize();
    // push keys to pipeline and compact
    ((CompactingMemStore) memstore).flushInMemory();
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemStoreSize());
    totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM + 7 * oneCellOnCAHeapSize;
    assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
    mss = memstore.getFlushableSize();
    // push keys to snapshot
    MemStoreSnapshot snapshot = memstore.snapshot();
    // simulate flusher
    region.decrMemStoreSize(mss);
    ImmutableSegment s = memstore.getSnapshot();
    assertEquals(7, s.getCellsCount());
    assertEquals(0, regionServicesForStores.getMemStoreSize());
    memstore.clearSnapshot(snapshot.getId());
}
Also used : MemoryCompactionPolicy(org.apache.hadoop.hbase.MemoryCompactionPolicy) Test(org.junit.Test)

Example 7 with MemoryCompactionPolicy

use of org.apache.hadoop.hbase.MemoryCompactionPolicy in project hbase by apache.

the class TestCompactingMemStore method testPuttingBackChunksWithOpeningPipelineScanner.

@Test
public void testPuttingBackChunksWithOpeningPipelineScanner() throws IOException {
    // set memstore to do data compaction and not to use the speculative scan
    MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.EAGER;
    memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType));
    ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration());
    byte[] row = Bytes.toBytes("testrow");
    byte[] fam = Bytes.toBytes("testfamily");
    byte[] qf1 = Bytes.toBytes("testqualifier1");
    byte[] qf2 = Bytes.toBytes("testqualifier2");
    byte[] qf3 = Bytes.toBytes("testqualifier3");
    byte[] val = Bytes.toBytes("testval");
    // Setting up memstore
    memstore.add(new KeyValue(row, fam, qf1, 1, val), null);
    memstore.add(new KeyValue(row, fam, qf2, 1, val), null);
    memstore.add(new KeyValue(row, fam, qf3, 1, val), null);
    // Creating a pipeline
    ((MyCompactingMemStore) memstore).disableCompaction();
    ((CompactingMemStore) memstore).flushInMemory();
    // Adding value to "new" memstore
    assertEquals(0, memstore.getActive().getCellsCount());
    memstore.add(new KeyValue(row, fam, qf1, 2, val), null);
    memstore.add(new KeyValue(row, fam, qf2, 2, val), null);
    assertEquals(2, memstore.getActive().getCellsCount());
    // pipeline bucket 2
    ((CompactingMemStore) memstore).flushInMemory();
    // opening scanner before force flushing
    List<KeyValueScanner> scanners = memstore.getScanners(0);
    // Shouldn't putting back the chunks to pool,since some scanners are opening
    // based on their data
    ((MyCompactingMemStore) memstore).enableCompaction();
    // trigger compaction
    ((CompactingMemStore) memstore).flushInMemory();
    // Adding value to "new" memstore
    assertEquals(0, memstore.getActive().getCellsCount());
    memstore.add(new KeyValue(row, fam, qf3, 3, val), null);
    memstore.add(new KeyValue(row, fam, qf2, 3, val), null);
    memstore.add(new KeyValue(row, fam, qf1, 3, val), null);
    assertEquals(3, memstore.getActive().getCellsCount());
    assertTrue(chunkCreator.getPoolSize() == 0);
    // Chunks will be put back to pool after close scanners;
    for (KeyValueScanner scanner : scanners) {
        scanner.close();
    }
    assertTrue(chunkCreator.getPoolSize() > 0);
    // clear chunks
    chunkCreator.clearChunksInPool();
    // Creating another snapshot
    MemStoreSnapshot snapshot = memstore.snapshot();
    // close the scanners
    for (KeyValueScanner scanner : snapshot.getScanners()) {
        scanner.close();
    }
    memstore.clearSnapshot(snapshot.getId());
    snapshot = memstore.snapshot();
    // Adding more value
    memstore.add(new KeyValue(row, fam, qf2, 4, val), null);
    memstore.add(new KeyValue(row, fam, qf3, 4, val), null);
    // opening scanners
    scanners = memstore.getScanners(0);
    // close scanners before clear the snapshot
    for (KeyValueScanner scanner : scanners) {
        scanner.close();
    }
    // close the scanners
    for (KeyValueScanner scanner : snapshot.getScanners()) {
        scanner.close();
    }
    memstore.clearSnapshot(snapshot.getId());
    assertTrue(chunkCreator.getPoolSize() > 0);
}
Also used : MemoryCompactionPolicy(org.apache.hadoop.hbase.MemoryCompactionPolicy) KeyValue(org.apache.hadoop.hbase.KeyValue) Test(org.junit.Test)

Example 8 with MemoryCompactionPolicy

use of org.apache.hadoop.hbase.MemoryCompactionPolicy in project hbase by apache.

the class TestCompactingMemStore method testCompaction3Buckets.

@Test
public void testCompaction3Buckets() throws IOException {
    // set memstore to do data compaction and not to use the speculative scan
    MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.EAGER;
    memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType));
    ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration());
    String[] keys1 = { "A", "A", "B", "C" };
    String[] keys2 = { "A", "B", "D" };
    String[] keys3 = { "D", "B", "B" };
    // Adding 4 cells.
    int totalCellsLen1 = addRowsByKeys(memstore, keys1);
    int oneCellOnCSLMHeapSize = 120;
    int oneCellOnCAHeapSize = 88;
    assertEquals(totalCellsLen1, region.getMemStoreDataSize());
    long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * oneCellOnCSLMHeapSize;
    assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
    // push keys to pipeline and compact
    ((CompactingMemStore) memstore).flushInMemory();
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    // One cell is duplicated and the compaction will remove it. All cells of same time so adjusting
    // totalCellsLen
    totalCellsLen1 = (totalCellsLen1 * 3) / 4;
    assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize());
    // In memory flush to make a CellArrayMap instead of CSLM. See the overhead diff.
    totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM + 3 * oneCellOnCAHeapSize;
    assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
    // Adding 3 more cells.
    int totalCellsLen2 = addRowsByKeys(memstore, keys2);
    long totalHeapSize2 = totalHeapSize + 3 * oneCellOnCSLMHeapSize;
    assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemStoreSize());
    assertEquals(totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
    ((MyCompactingMemStore) memstore).disableCompaction();
    MemStoreSize mss = memstore.getFlushableSize();
    // push keys to pipeline without compaction
    ((CompactingMemStore) memstore).flushInMemory();
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    // No change in the cells data size. ie. memstore size. as there is no compaction.
    assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemStoreSize());
    assertEquals(totalHeapSize2 + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM, ((CompactingMemStore) memstore).heapSize());
    // 3 more cells added
    int totalCellsLen3 = addRowsByKeys(memstore, keys3);
    assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3, regionServicesForStores.getMemStoreSize());
    long totalHeapSize3 = totalHeapSize2 + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM + 3 * oneCellOnCSLMHeapSize;
    assertEquals(totalHeapSize3, ((CompactingMemStore) memstore).heapSize());
    ((MyCompactingMemStore) memstore).enableCompaction();
    mss = memstore.getFlushableSize();
    // push keys to pipeline and compact
    ((CompactingMemStore) memstore).flushInMemory();
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    // active flushed to pipeline and all 3 segments compacted. Will get rid of duplicated cells.
    // Out of total 10, only 4 cells are unique
    // 2 out of 3 cells are duplicated
    totalCellsLen2 = totalCellsLen2 / 3;
    // All duplicated cells.
    totalCellsLen3 = 0;
    assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3, regionServicesForStores.getMemStoreSize());
    // Only 4 unique cells left
    assertEquals(4 * oneCellOnCAHeapSize + MutableSegment.DEEP_OVERHEAD + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM, ((CompactingMemStore) memstore).heapSize());
    mss = memstore.getFlushableSize();
    // push keys to snapshot
    MemStoreSnapshot snapshot = memstore.snapshot();
    // simulate flusher
    region.decrMemStoreSize(mss);
    ImmutableSegment s = memstore.getSnapshot();
    assertEquals(4, s.getCellsCount());
    assertEquals(0, regionServicesForStores.getMemStoreSize());
    memstore.clearSnapshot(snapshot.getId());
}
Also used : MemoryCompactionPolicy(org.apache.hadoop.hbase.MemoryCompactionPolicy) Test(org.junit.Test)

Example 9 with MemoryCompactionPolicy

use of org.apache.hadoop.hbase.MemoryCompactionPolicy in project hbase by apache.

the class TestCompactingMemStore method testCompaction1Bucket.

// ////////////////////////////////////////////////////////////////////////////
// Compaction tests
// ////////////////////////////////////////////////////////////////////////////
@Test
public void testCompaction1Bucket() throws IOException {
    // set memstore to do basic structure flattening, the "eager" option is tested in
    // TestCompactingToCellFlatMapMemStore
    MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC;
    memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType));
    ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration());
    // A1, A2, B3, C4
    String[] keys1 = { "A", "A", "B", "C" };
    // test 1 bucket
    int totalCellsLen = addRowsByKeys(memstore, keys1);
    int oneCellOnCSLMHeapSize = 120;
    int oneCellOnCAHeapSize = 88;
    long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * oneCellOnCSLMHeapSize;
    assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
    assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
    // push keys to pipeline and compact
    ((CompactingMemStore) memstore).flushInMemory();
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    // There is no compaction, as the compacting memstore type is basic.
    // totalCellsLen remains the same
    totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM + 4 * oneCellOnCAHeapSize;
    assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
    assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
    MemStoreSize mss = memstore.getFlushableSize();
    // push keys to snapshot
    MemStoreSnapshot snapshot = memstore.snapshot();
    // simulate flusher
    region.decrMemStoreSize(mss);
    ImmutableSegment s = memstore.getSnapshot();
    assertEquals(4, s.getCellsCount());
    assertEquals(0, regionServicesForStores.getMemStoreSize());
    memstore.clearSnapshot(snapshot.getId());
}
Also used : MemoryCompactionPolicy(org.apache.hadoop.hbase.MemoryCompactionPolicy) Test(org.junit.Test)

Example 10 with MemoryCompactionPolicy

use of org.apache.hadoop.hbase.MemoryCompactionPolicy in project hbase by apache.

the class TestCompactingToCellFlatMapMemStore method testMerging.

// ////////////////////////////////////////////////////////////////////////////
// Merging tests
// ////////////////////////////////////////////////////////////////////////////
@Test
public void testMerging() throws IOException {
    if (toCellChunkMap) {
        // set memstore to flat into CellChunkMap
        ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP);
    }
    String[] keys1 = { "A", "A", "B", "C", "F", "H" };
    String[] keys2 = { "A", "B", "D", "G", "I", "J" };
    String[] keys3 = { "D", "B", "B", "E" };
    MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC;
    memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType));
    ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration());
    addRowsByKeysDataSize(memstore, keys1);
    // push keys to pipeline should not compact
    ((CompactingMemStore) memstore).flushInMemory();
    while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
        Threads.sleep(10);
    }
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    // also should only flatten
    addRowsByKeysDataSize(memstore, keys2);
    int counter2 = 0;
    for (Segment s : memstore.getSegments()) {
        counter2 += s.getCellsCount();
    }
    assertEquals(12, counter2);
    ((MyCompactingMemStore) memstore).disableCompaction();
    // push keys to pipeline without flattening
    ((CompactingMemStore) memstore).flushInMemory();
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    int counter3 = 0;
    for (Segment s : memstore.getSegments()) {
        counter3 += s.getCellsCount();
    }
    assertEquals(12, counter3);
    addRowsByKeysDataSize(memstore, keys3);
    int counter4 = 0;
    for (Segment s : memstore.getSegments()) {
        counter4 += s.getCellsCount();
    }
    assertEquals(16, counter4);
    ((MyCompactingMemStore) memstore).enableCompaction();
    // push keys to pipeline and compact
    ((CompactingMemStore) memstore).flushInMemory();
    while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
        Threads.sleep(10);
    }
    assertEquals(0, memstore.getSnapshot().getCellsCount());
    int counter = 0;
    for (Segment s : memstore.getSegments()) {
        counter += s.getCellsCount();
    }
    assertEquals(16, counter);
    // push keys to snapshot
    MemStoreSnapshot snapshot = memstore.snapshot();
    ImmutableSegment s = memstore.getSnapshot();
    memstore.clearSnapshot(snapshot.getId());
}
Also used : MemoryCompactionPolicy(org.apache.hadoop.hbase.MemoryCompactionPolicy) Test(org.junit.Test)

Aggregations

MemoryCompactionPolicy (org.apache.hadoop.hbase.MemoryCompactionPolicy)13 Test (org.junit.Test)12 KeyValue (org.apache.hadoop.hbase.KeyValue)5 ArrayList (java.util.ArrayList)1 Cell (org.apache.hadoop.hbase.Cell)1