use of org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy in project hbase by apache.
the class DateTieredStoreEngine method createComponents.
@Override
protected void createComponents(Configuration conf, Store store, CellComparator kvComparator) throws IOException {
this.compactionPolicy = new DateTieredCompactionPolicy(conf, store);
this.storeFileManager = new DefaultStoreFileManager(kvComparator, StoreFile.Comparators.SEQ_ID_MAX_TIMESTAMP, conf, compactionPolicy.getConf());
this.storeFlusher = new DefaultStoreFlusher(conf, store);
this.compactor = new DateTieredCompactor(conf, store);
}
use of org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy in project hbase by apache.
the class AbstractTestDateTieredCompactionPolicy method compactEquals.
protected void compactEquals(long now, ArrayList<StoreFile> candidates, long[] expectedFileSizes, long[] expectedBoundaries, boolean isMajor, boolean toCompact) throws IOException {
ManualEnvironmentEdge timeMachine = new ManualEnvironmentEdge();
EnvironmentEdgeManager.injectEdge(timeMachine);
timeMachine.setValue(now);
DateTieredCompactionRequest request;
DateTieredCompactionPolicy policy = (DateTieredCompactionPolicy) store.storeEngine.getCompactionPolicy();
if (isMajor) {
for (StoreFile file : candidates) {
((MockStoreFile) file).setIsMajor(true);
}
assertEquals(toCompact, policy.shouldPerformMajorCompaction(candidates));
request = (DateTieredCompactionRequest) policy.selectMajorCompaction(candidates);
} else {
assertEquals(toCompact, policy.needsCompaction(candidates, ImmutableList.<StoreFile>of()));
request = (DateTieredCompactionRequest) policy.selectMinorCompaction(candidates, false, false);
}
List<StoreFile> actual = Lists.newArrayList(request.getFiles());
assertEquals(Arrays.toString(expectedFileSizes), Arrays.toString(getSizes(actual)));
assertEquals(Arrays.toString(expectedBoundaries), Arrays.toString(request.getBoundaries().toArray()));
}
Aggregations