use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class TestCompactedHFilesDischarger method testCompactedHFilesCleaner.
@Test
public void testCompactedHFilesCleaner() throws Exception {
// Create the cleaner object
CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(1000, (Stoppable) null, rss, false);
// Add some data to the region and do some flushes
for (int i = 1; i < 10; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
for (int i = 11; i < 20; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
for (int i = 21; i < 30; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
Store store = region.getStore(fam);
assertEquals(3, store.getStorefilesCount());
Collection<StoreFile> storefiles = store.getStorefiles();
Collection<StoreFile> compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
// None of the files should be in compacted state.
for (StoreFile file : storefiles) {
assertFalse(file.isCompactedAway());
}
// Try to run the cleaner without compaction. there should not be any change
cleaner.chore();
storefiles = store.getStorefiles();
// None of the files should be in compacted state.
for (StoreFile file : storefiles) {
assertFalse(file.isCompactedAway());
}
// now do some compaction
region.compact(true);
// Still the flushed files should be present until the cleaner runs. But the state of it should
// be in COMPACTED state
assertEquals(1, store.getStorefilesCount());
assertEquals(3, ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles().size());
// Run the cleaner
cleaner.chore();
assertEquals(1, store.getStorefilesCount());
storefiles = store.getStorefiles();
for (StoreFile file : storefiles) {
// Should not be in compacted state
assertFalse(file.isCompactedAway());
}
compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
assertTrue(compactedfiles.isEmpty());
}
use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class TestCompactedHFilesDischarger method testCleanerWithParallelScannersAfterCompaction.
@Test
public void testCleanerWithParallelScannersAfterCompaction() throws Exception {
// Create the cleaner object
CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(1000, (Stoppable) null, rss, false);
// Add some data to the region and do some flushes
for (int i = 1; i < 10; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
for (int i = 11; i < 20; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
for (int i = 21; i < 30; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
Store store = region.getStore(fam);
assertEquals(3, store.getStorefilesCount());
Collection<StoreFile> storefiles = store.getStorefiles();
Collection<StoreFile> compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
// None of the files should be in compacted state.
for (StoreFile file : storefiles) {
assertFalse(file.isCompactedAway());
}
// Do compaction
region.compact(true);
startScannerThreads();
storefiles = store.getStorefiles();
int usedReaderCount = 0;
int unusedReaderCount = 0;
for (StoreFile file : storefiles) {
if (file.getRefCount() == 3) {
usedReaderCount++;
}
}
compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
for (StoreFile file : compactedfiles) {
assertEquals("Refcount should be 3", 0, file.getRefCount());
unusedReaderCount++;
}
// Though there are files we are not using them for reads
assertEquals("unused reader count should be 3", 3, unusedReaderCount);
assertEquals("used reader count should be 1", 1, usedReaderCount);
// now run the cleaner
cleaner.chore();
countDown();
assertEquals(1, store.getStorefilesCount());
storefiles = store.getStorefiles();
for (StoreFile file : storefiles) {
// Should not be in compacted state
assertFalse(file.isCompactedAway());
}
compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
assertTrue(compactedfiles.isEmpty());
}
use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class TestDateTieredCompactor method verify.
private void verify(KeyValue[] input, List<Long> boundaries, KeyValue[][] output, boolean allFiles) throws Exception {
StoreFileWritersCapture writers = new StoreFileWritersCapture();
StoreFile sf1 = createDummyStoreFile(1L);
StoreFile sf2 = createDummyStoreFile(2L);
DateTieredCompactor dtc = createCompactor(writers, input, Arrays.asList(sf1, sf2));
List<Path> paths = dtc.compact(new CompactionRequest(Arrays.asList(sf1)), boundaries.subList(0, boundaries.size() - 1), NoLimitThroughputController.INSTANCE, null);
writers.verifyKvs(output, allFiles, boundaries);
if (allFiles) {
assertEquals(output.length, paths.size());
}
}
use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class TestStripeCompactionPolicy method createStripes.
private static StripeInformationProvider createStripes(List<byte[]> boundaries, List<List<Long>> stripeSizes, List<Long> l0Sizes) throws Exception {
List<List<StoreFile>> stripeFiles = new ArrayList<>(stripeSizes.size());
for (List<Long> sizes : stripeSizes) {
List<StoreFile> sfs = new ArrayList<>(sizes.size());
for (Long size : sizes) {
sfs.add(createFile(size));
}
stripeFiles.add(sfs);
}
List<StoreFile> l0Files = new ArrayList<>();
for (Long size : l0Sizes) {
l0Files.add(createFile(size));
}
return createStripesWithFiles(boundaries, stripeFiles, l0Files);
}
use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class TestStripeCompactionPolicy method createFile.
private static StoreFile createFile(long size) throws Exception {
StoreFile sf = mock(StoreFile.class);
when(sf.getPath()).thenReturn(new Path("moo"));
StoreFileReader r = mock(StoreFileReader.class);
when(r.getEntries()).thenReturn(size);
when(r.length()).thenReturn(size);
when(r.getBloomFilterType()).thenReturn(BloomType.NONE);
when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class));
when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong(), anyLong(), anyBoolean())).thenReturn(mock(StoreFileScanner.class));
when(sf.getReader()).thenReturn(r);
when(sf.createReader(anyBoolean())).thenReturn(r);
when(sf.createReader()).thenReturn(r);
when(sf.cloneForReader()).thenReturn(sf);
return sf;
}
Aggregations