use of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest in project hbase by apache.
the class HStore method compact.
@Override
public List<StoreFile> compact(CompactionContext compaction, ThroughputController throughputController, User user) throws IOException {
assert compaction != null;
List<StoreFile> sfs = null;
CompactionRequest cr = compaction.getRequest();
try {
// Do all sanity checking in here if we have a valid CompactionRequest
// because we need to clean up after it on the way out in a finally
// block below
long compactionStartTime = EnvironmentEdgeManager.currentTime();
assert compaction.hasSelection();
Collection<StoreFile> filesToCompact = cr.getFiles();
assert !filesToCompact.isEmpty();
synchronized (filesCompacting) {
// sanity check: we're compacting files that this store knows about
// TODO: change this to LOG.error() after more debugging
Preconditions.checkArgument(filesCompacting.containsAll(filesToCompact));
}
// Ready to go. Have list of files to compact.
LOG.info("Starting compaction of " + filesToCompact.size() + " file(s) in " + this + " of " + this.getRegionInfo().getRegionNameAsString() + " into tmpdir=" + fs.getTempDir() + ", totalSize=" + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1));
// Commence the compaction.
List<Path> newFiles = compaction.compact(throughputController, user);
long outputBytes = 0L;
// TODO: get rid of this!
if (!this.conf.getBoolean("hbase.hstore.compaction.complete", true)) {
LOG.warn("hbase.hstore.compaction.complete is set to false");
sfs = new ArrayList<>(newFiles.size());
final boolean evictOnClose = cacheConf != null ? cacheConf.shouldEvictOnClose() : true;
for (Path newFile : newFiles) {
// Create storefile around what we wrote with a reader on it.
StoreFile sf = createStoreFileAndReader(newFile);
sf.closeReader(evictOnClose);
sfs.add(sf);
}
return sfs;
}
// Do the steps necessary to complete the compaction.
sfs = moveCompatedFilesIntoPlace(cr, newFiles, user);
writeCompactionWalRecord(filesToCompact, sfs);
replaceStoreFiles(filesToCompact, sfs);
if (cr.isMajor()) {
majorCompactedCellsCount += getCompactionProgress().totalCompactingKVs;
majorCompactedCellsSize += getCompactionProgress().totalCompactedSize;
} else {
compactedCellsCount += getCompactionProgress().totalCompactingKVs;
compactedCellsSize += getCompactionProgress().totalCompactedSize;
}
for (StoreFile sf : sfs) {
outputBytes += sf.getReader().length();
}
// At this point the store will use new files for all new scanners.
// update store size.
completeCompaction(filesToCompact);
long now = EnvironmentEdgeManager.currentTime();
if (region.getRegionServerServices() != null && region.getRegionServerServices().getMetrics() != null) {
region.getRegionServerServices().getMetrics().updateCompaction(cr.isMajor(), now - compactionStartTime, cr.getFiles().size(), newFiles.size(), cr.getSize(), outputBytes);
}
logCompactionEndMessage(cr, sfs, now, compactionStartTime);
return sfs;
} finally {
finishCompactionRequest(cr);
}
}
use of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest in project hbase by apache.
the class TestCompactionPolicy method compactEquals.
void compactEquals(List<StoreFile> candidates, boolean forcemajor, boolean isOffPeak, long... expected) throws IOException {
store.forceMajor = forcemajor;
// Test Default compactions
CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()).selectCompaction(candidates, new ArrayList<>(), false, isOffPeak, forcemajor);
List<StoreFile> actual = new ArrayList<>(result.getFiles());
if (isOffPeak && !forcemajor) {
Assert.assertTrue(result.isOffPeak());
}
Assert.assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
store.forceMajor = false;
}
use of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest in project hbase by apache.
the class TestMajorCompaction method testNonUserMajorCompactionRequest.
/**
* Test for HBASE-5920 - Test user requested major compactions always occurring
*/
@Test
public void testNonUserMajorCompactionRequest() throws Exception {
Store store = r.getStore(COLUMN_FAMILY);
createStoreFile(r);
for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
createStoreFile(r);
}
store.triggerMajorCompaction();
CompactionRequest request = store.requestCompaction(Store.NO_PRIORITY, null).getRequest();
assertNotNull("Expected to receive a compaction request", request);
assertEquals("System-requested major compaction should not occur if there are too many store files", false, request.isMajor());
}
use of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest in project hbase by apache.
the class TestStripeStoreEngine method testCompactionContextForceSelect.
@Test
public void testCompactionContextForceSelect() throws Exception {
Configuration conf = HBaseConfiguration.create();
int targetCount = 2;
conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, targetCount);
conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, 2);
conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, TestStoreEngine.class.getName());
TestStoreEngine se = createEngine(conf);
StripeCompactor mockCompactor = mock(StripeCompactor.class);
se.setCompactorOverride(mockCompactor);
when(mockCompactor.compact(any(CompactionRequest.class), anyInt(), anyLong(), any(byte[].class), any(byte[].class), any(byte[].class), any(byte[].class), any(ThroughputController.class), any(User.class))).thenReturn(new ArrayList<>());
// Produce 3 L0 files.
StoreFile sf = createFile();
ArrayList<StoreFile> compactUs = al(sf, createFile(), createFile());
se.getStoreFileManager().loadFiles(compactUs);
// Create a compaction that would want to split the stripe.
CompactionContext compaction = se.createCompaction();
compaction.select(al(), false, false, false);
assertEquals(3, compaction.getRequest().getFiles().size());
// Override the file list. Granted, overriding this compaction in this manner will
// break things in real world, but we only want to verify the override.
compactUs.remove(sf);
CompactionRequest req = new CompactionRequest(compactUs);
compaction.forceSelect(req);
assertEquals(2, compaction.getRequest().getFiles().size());
assertFalse(compaction.getRequest().getFiles().contains(sf));
// Make sure the correct method it called on compactor.
compaction.compact(NoLimitThroughputController.INSTANCE, null);
verify(mockCompactor, times(1)).compact(compaction.getRequest(), targetCount, 0L, StripeStoreFileManager.OPEN_KEY, StripeStoreFileManager.OPEN_KEY, null, null, NoLimitThroughputController.INSTANCE, null);
}
Aggregations