use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class TestStripeCompactionPolicy method createFile.
private static HStoreFile createFile(long size) throws Exception {
HStoreFile sf = mock(HStoreFile.class);
when(sf.getPath()).thenReturn(new Path("moo"));
StoreFileReader r = mock(StoreFileReader.class);
when(r.getEntries()).thenReturn(size);
when(r.length()).thenReturn(size);
when(r.getBloomFilterType()).thenReturn(BloomType.NONE);
when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class));
when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong(), anyLong(), anyBoolean())).thenReturn(mock(StoreFileScanner.class));
when(sf.getReader()).thenReturn(r);
when(sf.getBulkLoadTimestamp()).thenReturn(OptionalLong.empty());
when(r.getMaxTimestamp()).thenReturn(TimeRange.INITIAL_MAX_TIMESTAMP);
return sf;
}
use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class TestCompactedHFilesDischarger method testCleanerWithParallelScannersAfterCompaction.
@Test
public void testCleanerWithParallelScannersAfterCompaction() throws Exception {
// Create the cleaner object
CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(1000, (Stoppable) null, rss, false);
// Add some data to the region and do some flushes
for (int i = 1; i < 10; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
for (int i = 11; i < 20; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
for (int i = 21; i < 30; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
HStore store = region.getStore(fam);
assertEquals(3, store.getStorefilesCount());
Collection<HStoreFile> storefiles = store.getStorefiles();
Collection<HStoreFile> compactedfiles = store.getStoreEngine().getStoreFileManager().getCompactedfiles();
// None of the files should be in compacted state.
for (HStoreFile file : storefiles) {
assertFalse(file.isCompactedAway());
}
// Do compaction
region.compact(true);
startScannerThreads();
storefiles = store.getStorefiles();
int usedReaderCount = 0;
int unusedReaderCount = 0;
for (HStoreFile file : storefiles) {
if (((HStoreFile) file).getRefCount() == 3) {
usedReaderCount++;
}
}
compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
for (HStoreFile file : compactedfiles) {
assertEquals("Refcount should be 3", 0, ((HStoreFile) file).getRefCount());
unusedReaderCount++;
}
// Though there are files we are not using them for reads
assertEquals("unused reader count should be 3", 3, unusedReaderCount);
assertEquals("used reader count should be 1", 1, usedReaderCount);
// now run the cleaner
cleaner.chore();
countDown();
assertEquals(1, store.getStorefilesCount());
storefiles = store.getStorefiles();
for (HStoreFile file : storefiles) {
// Should not be in compacted state
assertFalse(file.isCompactedAway());
}
compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
assertTrue(compactedfiles.isEmpty());
}
use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class TestCompactedHFilesDischarger method testCompactedHFilesCleaner.
@Test
public void testCompactedHFilesCleaner() throws Exception {
// Create the cleaner object
CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(1000, (Stoppable) null, rss, false);
// Add some data to the region and do some flushes
for (int i = 1; i < 10; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
for (int i = 11; i < 20; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
for (int i = 21; i < 30; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
HStore store = region.getStore(fam);
assertEquals(3, store.getStorefilesCount());
Collection<HStoreFile> storefiles = store.getStorefiles();
Collection<HStoreFile> compactedfiles = store.getStoreEngine().getStoreFileManager().getCompactedfiles();
// None of the files should be in compacted state.
for (HStoreFile file : storefiles) {
assertFalse(file.isCompactedAway());
}
// Try to run the cleaner without compaction. there should not be any change
cleaner.chore();
storefiles = store.getStorefiles();
// None of the files should be in compacted state.
for (HStoreFile file : storefiles) {
assertFalse(file.isCompactedAway());
}
// now do some compaction
region.compact(true);
// Still the flushed files should be present until the cleaner runs. But the state of it should
// be in COMPACTED state
assertEquals(1, store.getStorefilesCount());
assertEquals(3, ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles().size());
// Run the cleaner
cleaner.chore();
assertEquals(1, store.getStorefilesCount());
storefiles = store.getStorefiles();
for (HStoreFile file : storefiles) {
// Should not be in compacted state
assertFalse(file.isCompactedAway());
}
compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
assertTrue(compactedfiles.isEmpty());
}
use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class TestDateTieredCompactor method verify.
private void verify(KeyValue[] input, List<Long> boundaries, KeyValue[][] output, boolean allFiles) throws Exception {
StoreFileWritersCapture writers = new StoreFileWritersCapture();
HStoreFile sf1 = createDummyStoreFile(1L);
HStoreFile sf2 = createDummyStoreFile(2L);
DateTieredCompactor dtc = createCompactor(writers, input, Arrays.asList(sf1, sf2));
List<Path> paths = dtc.compact(new CompactionRequestImpl(Arrays.asList(sf1)), boundaries.subList(0, boundaries.size() - 1), new HashMap<Long, String>(), NoLimitThroughputController.INSTANCE, null);
writers.verifyKvs(output, allFiles, boundaries);
if (allFiles) {
assertEquals(output.length, paths.size());
}
}
Aggregations