use of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider in project hbase by apache.
the class TestStripeCompactionPolicy method createStripesWithFiles.
/**
* This method actually does all the work.
*/
private static StripeInformationProvider createStripesWithFiles(List<byte[]> boundaries, List<List<StoreFile>> stripeFiles, List<StoreFile> l0Files) throws Exception {
ArrayList<ImmutableList<StoreFile>> stripes = new ArrayList<>();
ArrayList<byte[]> boundariesList = new ArrayList<>();
StripeInformationProvider si = mock(StripeInformationProvider.class);
if (!stripeFiles.isEmpty()) {
assert stripeFiles.size() == (boundaries.size() + 1);
boundariesList.add(OPEN_KEY);
for (int i = 0; i <= boundaries.size(); ++i) {
byte[] startKey = ((i == 0) ? OPEN_KEY : boundaries.get(i - 1));
byte[] endKey = ((i == boundaries.size()) ? OPEN_KEY : boundaries.get(i));
boundariesList.add(endKey);
for (StoreFile sf : stripeFiles.get(i)) {
setFileStripe(sf, startKey, endKey);
}
stripes.add(ImmutableList.copyOf(stripeFiles.get(i)));
when(si.getStartRow(eq(i))).thenReturn(startKey);
when(si.getEndRow(eq(i))).thenReturn(endKey);
}
}
ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<>();
sfs.addAllSublists(stripes);
sfs.addSublist(l0Files);
when(si.getStorefiles()).thenReturn(sfs);
when(si.getStripes()).thenReturn(stripes);
when(si.getStripeBoundaries()).thenReturn(boundariesList);
when(si.getStripeCount()).thenReturn(stripes.size());
when(si.getLevel0Files()).thenReturn(l0Files);
return si;
}
use of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider in project hbase by apache.
the class TestStripeCompactionPolicy method testNewStripesFromFlush.
@Test
public void testNewStripesFromFlush() throws Exception {
StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create());
StripeInformationProvider si = createStripesL0Only(0, 0);
KeyValue[] input = new KeyValue[] { KV_B, KV_C, KV_C, KV_D, KV_E };
// Starts with one stripe; unlike flush results, must have metadata
KeyValue[][] expected = new KeyValue[][] { input };
verifyFlush(policy, si, input, expected, new byte[][] { OPEN_KEY, OPEN_KEY });
}
use of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider in project hbase by apache.
the class TestStripeCompactionPolicy method testNoStripesFromFlush.
@Test
public void testNoStripesFromFlush() throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setBoolean(StripeStoreConfig.FLUSH_TO_L0_KEY, true);
StripeCompactionPolicy policy = createPolicy(conf);
StripeInformationProvider si = createStripesL0Only(0, 0);
KeyValue[] input = new KeyValue[] { KV_A, KV_B, KV_C, KV_D, KV_E };
KeyValue[][] expected = new KeyValue[][] { input };
verifyFlush(policy, si, input, expected, null);
}
use of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider in project hbase by apache.
the class TestStripeCompactionPolicy method testSingleStripeCompaction.
@Test
public void testSingleStripeCompaction() throws Exception {
// Create a special policy that only compacts single stripes, using standard methods.
Configuration conf = HBaseConfiguration.create();
// Test depends on this not being set to pass. Default breaks test. TODO: Revisit.
conf.unset("hbase.hstore.compaction.min.size");
conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.0F);
conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 3);
conf.setInt(StripeStoreConfig.MAX_FILES_KEY, 4);
// make sure the are no splits
conf.setLong(StripeStoreConfig.SIZE_TO_SPLIT_KEY, 1000);
StoreConfigInformation sci = mock(StoreConfigInformation.class);
when(sci.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO);
StripeStoreConfig ssc = new StripeStoreConfig(conf, sci);
StripeCompactionPolicy policy = new StripeCompactionPolicy(conf, sci, ssc) {
@Override
public StripeCompactionRequest selectCompaction(StripeInformationProvider si, List<HStoreFile> filesCompacting, boolean isOffpeak) throws IOException {
if (!filesCompacting.isEmpty()) {
return null;
}
return selectSingleStripeCompaction(si, false, false, isOffpeak);
}
@Override
public boolean needsCompactions(StripeInformationProvider si, List<HStoreFile> filesCompacting) {
if (!filesCompacting.isEmpty()) {
return false;
}
return needsSingleStripeCompaction(si);
}
};
// No compaction due to min files or ratio
StripeInformationProvider si = createStripesWithSizes(0, 0, new Long[] { 2L }, new Long[] { 3L, 3L }, new Long[] { 5L, 1L });
verifyNoCompaction(policy, si);
// No compaction due to min files or ratio - will report needed, but not do any.
si = createStripesWithSizes(0, 0, new Long[] { 2L }, new Long[] { 3L, 3L }, new Long[] { 5L, 1L, 1L });
assertNull(policy.selectCompaction(si, al(), false));
assertTrue(policy.needsCompactions(si, al()));
// One stripe has possible compaction
si = createStripesWithSizes(0, 0, new Long[] { 2L }, new Long[] { 3L, 3L }, new Long[] { 5L, 4L, 3L });
verifySingleStripeCompaction(policy, si, 2, null);
// Several stripes have possible compactions; choose best quality (removes most files)
si = createStripesWithSizes(0, 0, new Long[] { 3L, 2L, 2L }, new Long[] { 2L, 2L, 1L }, new Long[] { 3L, 2L, 2L, 1L });
verifySingleStripeCompaction(policy, si, 2, null);
si = createStripesWithSizes(0, 0, new Long[] { 5L }, new Long[] { 3L, 2L, 2L, 1L }, new Long[] { 3L, 2L, 2L });
verifySingleStripeCompaction(policy, si, 1, null);
// Or with smallest files, if the count is the same
si = createStripesWithSizes(0, 0, new Long[] { 3L, 3L, 3L }, new Long[] { 3L, 1L, 2L }, new Long[] { 3L, 2L, 2L });
verifySingleStripeCompaction(policy, si, 1, null);
// Verify max count is respected.
si = createStripesWithSizes(0, 0, new Long[] { 5L }, new Long[] { 5L, 4L, 4L, 4L, 4L });
List<HStoreFile> sfs = si.getStripes().get(1).subList(1, 5);
verifyCompaction(policy, si, sfs, null, 1, null, si.getStartRow(1), si.getEndRow(1), true);
// Verify ratio is applied.
si = createStripesWithSizes(0, 0, new Long[] { 5L }, new Long[] { 50L, 4L, 4L, 4L, 4L });
sfs = si.getStripes().get(1).subList(1, 5);
verifyCompaction(policy, si, sfs, null, 1, null, si.getStartRow(1), si.getEndRow(1), true);
}
use of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider in project hbase by apache.
the class TestStripeCompactionPolicy method testSingleStripeDropDeletes.
@Test
public void testSingleStripeDropDeletes() throws Exception {
Configuration conf = HBaseConfiguration.create();
// Test depends on this not being set to pass. Default breaks test. TODO: Revisit.
conf.unset("hbase.hstore.compaction.min.size");
StripeCompactionPolicy policy = createPolicy(conf);
// Verify the deletes can be dropped if there are no L0 files.
Long[][] stripes = new Long[][] { new Long[] { 3L, 2L, 2L, 2L }, new Long[] { 6L } };
StripeInformationProvider si = createStripesWithSizes(0, 0, stripes);
verifySingleStripeCompaction(policy, si, 0, true);
// But cannot be dropped if there are.
si = createStripesWithSizes(2, 2, stripes);
verifySingleStripeCompaction(policy, si, 0, false);
// Unless there are enough to cause L0 compaction.
si = createStripesWithSizes(6, 2, stripes);
ConcatenatedLists<HStoreFile> sfs = new ConcatenatedLists<>();
sfs.addSublist(si.getLevel0Files());
sfs.addSublist(si.getStripes().get(0));
verifyCompaction(policy, si, sfs, si.getStartRow(0), si.getEndRow(0), si.getStripeBoundaries());
// If we cannot actually compact all files in some stripe, L0 is chosen.
si = createStripesWithSizes(6, 2, new Long[][] { new Long[] { 10L, 1L, 1L, 1L, 1L }, new Long[] { 12L } });
verifyCompaction(policy, si, si.getLevel0Files(), null, null, si.getStripeBoundaries());
// even if L0 has no file
// if all files of stripe aren't selected, delete must not be dropped.
stripes = new Long[][] { new Long[] { 100L, 3L, 2L, 2L, 2L }, new Long[] { 6L } };
si = createStripesWithSizes(0, 0, stripes);
List<HStoreFile> compactFile = new ArrayList<>();
Iterator<HStoreFile> iter = si.getStripes().get(0).listIterator(1);
while (iter.hasNext()) {
compactFile.add(iter.next());
}
verifyCompaction(policy, si, compactFile, false, 1, null, si.getStartRow(0), si.getEndRow(0), true);
}
Aggregations