use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class GaussianFileListGenerator method iterator.
@Override
public Iterator<List<StoreFile>> iterator() {
return new Iterator<List<StoreFile>>() {
private GaussianRandomGenerator gen = new GaussianRandomGenerator(new MersenneTwister(random.nextInt()));
private int count = 0;
@Override
public boolean hasNext() {
return count < MAX_FILE_GEN_ITERS;
}
@Override
public List<StoreFile> next() {
count += 1;
ArrayList<StoreFile> files = new ArrayList<>(NUM_FILES_GEN);
for (int i = 0; i < NUM_FILES_GEN; i++) {
files.add(createMockStoreFile((int) Math.ceil(Math.max(0, gen.nextNormalizedDouble() * 32 + 32))));
}
return files;
}
@Override
public void remove() {
}
};
}
use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class MockStoreFileGenerator method createMockStoreFile.
protected StoreFile createMockStoreFile(final long sizeInBytes, final long seqId) {
StoreFile mockSf = mock(StoreFile.class);
StoreFileReader reader = mock(StoreFileReader.class);
String stringPath = "/hbase/testTable/regionA/" + RandomStringUtils.random(FILENAME_LENGTH, 0, 0, true, true, null, random);
Path path = new Path(stringPath);
when(reader.getSequenceID()).thenReturn(seqId);
when(reader.getTotalUncompressedBytes()).thenReturn(sizeInBytes);
when(reader.length()).thenReturn(sizeInBytes);
when(mockSf.getPath()).thenReturn(path);
when(mockSf.excludeFromMinorCompaction()).thenReturn(false);
// TODO come back to
when(mockSf.isReference()).thenReturn(false);
// this when selection takes this into account
when(mockSf.getReader()).thenReturn(reader);
String toString = Objects.toStringHelper("MockStoreFile").add("isReference", false).add("fileSize", StringUtils.humanReadableInt(sizeInBytes)).add("seqId", seqId).add("path", stringPath).toString();
when(mockSf.toString()).thenReturn(toString);
return mockSf;
}
use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class PerfTestCompactionPolicies method runIteration.
private List<StoreFile> runIteration(List<StoreFile> startingStoreFiles) throws IOException {
List<StoreFile> storeFiles = new ArrayList<>(startingStoreFiles);
CompactionRequest req = cp.selectCompaction(storeFiles, new ArrayList<>(), false, false, false);
long newFileSize = 0;
Collection<StoreFile> filesToCompact = req.getFiles();
if (!filesToCompact.isEmpty()) {
storeFiles = new ArrayList<>(storeFiles);
storeFiles.removeAll(filesToCompact);
for (StoreFile storeFile : filesToCompact) {
newFileSize += storeFile.getReader().length();
}
storeFiles.add(createMockStoreFileBytes(newFileSize));
}
written += newFileSize;
return storeFiles;
}
use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class PerfTestCompactionPolicies method testSelection.
@Test
public final void testSelection() throws Exception {
long fileDiff = 0;
for (List<StoreFile> storeFileList : generator) {
List<StoreFile> currentFiles = new ArrayList<>(18);
for (StoreFile file : storeFileList) {
currentFiles.add(file);
currentFiles = runIteration(currentFiles);
}
fileDiff += (storeFileList.size() - currentFiles.size());
}
// print out tab delimited so that it can be used in excel/gdocs.
System.out.println(cp.getClass().getSimpleName() + "\t" + fileGenClass.getSimpleName() + "\t" + max + "\t" + min + "\t" + ratio + "\t" + written + "\t" + fileDiff);
}
use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class TestCompactedHFilesDischarger method testCleanerWithParallelScanners.
@Test
public void testCleanerWithParallelScanners() throws Exception {
// Create the cleaner object
CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(1000, (Stoppable) null, rss, false);
// Add some data to the region and do some flushes
for (int i = 1; i < 10; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
for (int i = 11; i < 20; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
for (int i = 21; i < 30; i++) {
Put p = new Put(Bytes.toBytes("row" + i));
p.addColumn(fam, qual1, val);
region.put(p);
}
// flush them
region.flush(true);
Store store = region.getStore(fam);
assertEquals(3, store.getStorefilesCount());
Collection<StoreFile> storefiles = store.getStorefiles();
Collection<StoreFile> compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
// None of the files should be in compacted state.
for (StoreFile file : storefiles) {
assertFalse(file.isCompactedAway());
}
startScannerThreads();
// Do compaction
region.compact(true);
storefiles = store.getStorefiles();
int usedReaderCount = 0;
int unusedReaderCount = 0;
for (StoreFile file : storefiles) {
if (file.getRefCount() == 0) {
unusedReaderCount++;
}
}
compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
for (StoreFile file : compactedfiles) {
assertEquals("Refcount should be 3", 3, file.getRefCount());
usedReaderCount++;
}
// The newly compacted file will not be used by any scanner
assertEquals("unused reader count should be 1", 1, unusedReaderCount);
assertEquals("used reader count should be 3", 3, usedReaderCount);
// now run the cleaner
cleaner.chore();
countDown();
// No change in the number of store files as none of the compacted files could be cleaned up
assertEquals(1, store.getStorefilesCount());
assertEquals(3, ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles().size());
while (scanCompletedCounter.get() != 3) {
Thread.sleep(100);
}
// reset
latch = new CountDownLatch(3);
scanCompletedCounter.set(0);
counter.set(0);
// Try creating a new scanner and it should use only the new file created after compaction
startScannerThreads();
storefiles = store.getStorefiles();
usedReaderCount = 0;
unusedReaderCount = 0;
for (StoreFile file : storefiles) {
if (file.getRefCount() == 3) {
usedReaderCount++;
}
}
compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
for (StoreFile file : compactedfiles) {
assertEquals("Refcount should be 0", 0, file.getRefCount());
unusedReaderCount++;
}
// Though there are files we are not using them for reads
assertEquals("unused reader count should be 3", 3, unusedReaderCount);
assertEquals("used reader count should be 1", 1, usedReaderCount);
countDown();
while (scanCompletedCounter.get() != 3) {
Thread.sleep(100);
}
// Run the cleaner again
cleaner.chore();
// Now the cleaner should be able to clear it up because there are no active readers
assertEquals(1, store.getStorefilesCount());
storefiles = store.getStorefiles();
for (StoreFile file : storefiles) {
// Should not be in compacted state
assertFalse(file.isCompactedAway());
}
compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
assertTrue(compactedfiles.isEmpty());
}
Aggregations