use of org.apache.hadoop.hbase.MemoryCompactionPolicy in project hbase by apache.
the class TestCompactingToCellFlatMapMemStore method testTimeRangeAfterMerge.
@Test
public void testTimeRangeAfterMerge() throws IOException {
if (toCellChunkMap) {
// set memstore to flat into CellChunkMap
((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP);
}
MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC;
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType));
((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration());
testTimeRange(false);
}
use of org.apache.hadoop.hbase.MemoryCompactionPolicy in project hbase by apache.
the class TestCompactingToCellFlatMapMemStore method testFlatteningToCellChunkMap.
@Test
public void testFlatteningToCellChunkMap() throws IOException {
// set memstore to flat into CellChunkMap
MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC;
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType));
((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration());
((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP);
int numOfCells = 8;
// A1, A2, B3, C4, D5, D6, E7, F8
String[] keys1 = { "A", "A", "B", "C", "D", "D", "E", "F" };
// make one cell
byte[] row = Bytes.toBytes(keys1[0]);
byte[] val = Bytes.toBytes(keys1[0] + 0);
KeyValue kv = new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), EnvironmentEdgeManager.currentTime(), val);
// test 1 bucket
int totalCellsLen = addRowsByKeys(memstore, keys1);
long oneCellOnCSLMHeapSize = ClassSize.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + KeyValue.FIXED_OVERHEAD + kv.getSerializedSize());
long totalHeapSize = numOfCells * oneCellOnCSLMHeapSize + MutableSegment.DEEP_OVERHEAD;
assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
// push keys to pipeline and flatten
((CompactingMemStore) memstore).flushInMemory();
assertEquals(0, memstore.getSnapshot().getCellsCount());
long oneCellOnCCMHeapSize = ClassSize.CELL_CHUNK_MAP_ENTRY + ClassSize.align(kv.getSerializedSize());
totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM + numOfCells * oneCellOnCCMHeapSize;
assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
MemStoreSize mss = memstore.getFlushableSize();
// push keys to snapshot
MemStoreSnapshot snapshot = memstore.snapshot();
// simulate flusher
region.decrMemStoreSize(mss);
ImmutableSegment s = memstore.getSnapshot();
assertEquals(numOfCells, s.getCellsCount());
assertEquals(0, regionServicesForStores.getMemStoreSize());
memstore.clearSnapshot(snapshot.getId());
}
use of org.apache.hadoop.hbase.MemoryCompactionPolicy in project hbase by apache.
the class TestCompactingToCellFlatMapMemStore method testForceCopyOfBigCellIntoImmutableSegment.
/**
* CellChunkMap Segment index requires all cell data to be written in the MSLAB Chunks.
* Even though MSLAB is enabled, cells bigger than the size of a chunk are not
* written in the MSLAB Chunks.
* If such cells are found in the process of a merge they need to be copied into MSLAB.
* testForceCopyOfBigCellIntoImmutableSegment checks that the
* ImmutableMemStoreLAB's forceCopyOfBigCellInto does what it's supposed to do.
*/
// Flakey. Disabled by HBASE-24128. HBASE-24129 is for reenable.
@org.junit.Ignore
// Flakey. Disabled by HBASE-24128. HBASE-24129 is for reenable.
@Test
public // expected:<8389924> but was:<8389992>
void testForceCopyOfBigCellIntoImmutableSegment() throws IOException {
if (toCellChunkMap == false) {
return;
}
// set memstore to flat into CellChunkMap
MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC;
memstore.getConfiguration().setInt(MemStoreCompactionStrategy.COMPACTING_MEMSTORE_THRESHOLD_KEY, 4);
memstore.getConfiguration().setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.014);
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType));
((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration());
((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP);
char[] chars = new char[MemStoreLAB.CHUNK_SIZE_DEFAULT];
for (int i = 0; i < chars.length; i++) {
chars[i] = 'A';
}
String bigVal = new String(chars);
byte[] val = Bytes.toBytes(bigVal);
// We need to add two cells, three times, in order to guarantee a merge
List<String[]> keysList = new ArrayList<>();
keysList.add(new String[] { "A", "B" });
keysList.add(new String[] { "C", "D" });
keysList.add(new String[] { "E", "F" });
keysList.add(new String[] { "G", "H" });
// Measuring the size of a single kv
KeyValue kv = new KeyValue(Bytes.toBytes("A"), Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), EnvironmentEdgeManager.currentTime(), val);
long oneCellOnCCMHeapSize = (long) ClassSize.CELL_CHUNK_MAP_ENTRY + ClassSize.align(kv.getSerializedSize());
long oneCellOnCSLMHeapSize = ClassSize.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + kv.heapSize());
long totalHeapSize = MutableSegment.DEEP_OVERHEAD;
for (int i = 0; i < keysList.size(); i++) {
addRowsByKeys(memstore, keysList.get(i), val);
while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
Threads.sleep(10);
}
if (i == 0) {
totalHeapSize += CellChunkImmutableSegment.DEEP_OVERHEAD_CCM + oneCellOnCCMHeapSize + oneCellOnCSLMHeapSize;
} else {
// The in-memory flush size is bigger than the size of a single cell,
// but smaller than the size of two cells.
// Therefore, the two created cells are flattened in a seperate segment.
totalHeapSize += 2 * (CellChunkImmutableSegment.DEEP_OVERHEAD_CCM + oneCellOnCCMHeapSize);
}
if (i == 2) {
// Four out of the five segments are merged into one
totalHeapSize -= (4 * CellChunkImmutableSegment.DEEP_OVERHEAD_CCM);
totalHeapSize = ClassSize.align(totalHeapSize);
}
assertEquals("i=" + i, totalHeapSize, ((CompactingMemStore) memstore).heapSize());
}
}
Aggregations